aboutsummaryrefslogtreecommitdiffstats
path: root/builtin
diff options
context:
space:
mode:
Diffstat (limited to 'builtin')
-rw-r--r--builtin/apply.c1
-rw-r--r--builtin/blame.c6
-rw-r--r--builtin/cat-file.c29
-rw-r--r--builtin/clone.c17
-rw-r--r--builtin/diff-tree.c3
-rw-r--r--builtin/fast-export.c1
-rw-r--r--builtin/fast-import.c2
-rw-r--r--builtin/fetch-pack.c8
-rw-r--r--builtin/fetch.c48
-rw-r--r--builtin/fsmonitor--daemon.c1479
-rw-r--r--builtin/gc.c1
-rw-r--r--builtin/index-pack.c4
-rw-r--r--builtin/log.c32
-rw-r--r--builtin/ls-tree.c366
-rw-r--r--builtin/mailsplit.c3
-rw-r--r--builtin/multi-pack-index.c45
-rw-r--r--builtin/name-rev.c92
-rw-r--r--builtin/pack-objects.c84
-rw-r--r--builtin/rebase.c63
-rw-r--r--builtin/reflog.c146
-rw-r--r--builtin/repack.c12
-rw-r--r--builtin/reset.c13
-rw-r--r--builtin/rev-list.c29
-rw-r--r--builtin/sparse-checkout.c1
-rw-r--r--builtin/stash.c6
-rw-r--r--builtin/submodule--helper.c795
-rw-r--r--builtin/update-index.c7
-rw-r--r--builtin/worktree.c41
28 files changed, 2659 insertions, 675 deletions
diff --git a/builtin/apply.c b/builtin/apply.c
index 3f099b9605..555219de40 100644
--- a/builtin/apply.c
+++ b/builtin/apply.c
@@ -1,7 +1,6 @@
#include "cache.h"
#include "builtin.h"
#include "parse-options.h"
-#include "lockfile.h"
#include "apply.h"
static const char * const apply_usage[] = {
diff --git a/builtin/blame.c b/builtin/blame.c
index 8d15b68afc..e33372c56b 100644
--- a/builtin/blame.c
+++ b/builtin/blame.c
@@ -898,6 +898,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
unsigned int range_i;
long anchor;
const int hexsz = the_hash_algo->hexsz;
+ long num_lines = 0;
setup_default_color_by_age();
git_config(git_blame_config, &output_option);
@@ -1129,7 +1130,10 @@ parse_done:
for (range_i = ranges.nr; range_i > 0; --range_i) {
const struct range *r = &ranges.ranges[range_i - 1];
ent = blame_entry_prepend(ent, r->start, r->end, o);
+ num_lines += (r->end - r->start);
}
+ if (!num_lines)
+ num_lines = sb.num_lines;
o->suspects = ent;
prio_queue_put(&sb.commits, o->commit);
@@ -1158,7 +1162,7 @@ parse_done:
sb.found_guilty_entry = &found_guilty_entry;
sb.found_guilty_entry_data = π
if (show_progress)
- pi.progress = start_delayed_progress(_("Blaming lines"), sb.num_lines);
+ pi.progress = start_delayed_progress(_("Blaming lines"), num_lines);
assign_blame(&sb, opt);
diff --git a/builtin/cat-file.c b/builtin/cat-file.c
index 0663661c0b..50cf38999d 100644
--- a/builtin/cat-file.c
+++ b/builtin/cat-file.c
@@ -360,6 +360,13 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
}
}
+static void print_default_format(struct strbuf *scratch, struct expand_data *data)
+{
+ strbuf_addf(scratch, "%s %s %"PRIuMAX"\n", oid_to_hex(&data->oid),
+ type_name(data->type),
+ (uintmax_t)data->size);
+}
+
/*
* If "pack" is non-NULL, then "offset" is the byte offset within the pack from
* which the object may be accessed (though note that we may also rely on
@@ -391,8 +398,14 @@ static void batch_object_write(const char *obj_name,
}
strbuf_reset(scratch);
- strbuf_expand(scratch, opt->format, expand_format, data);
- strbuf_addch(scratch, '\n');
+
+ if (!opt->format) {
+ print_default_format(scratch, data);
+ } else {
+ strbuf_expand(scratch, opt->format, expand_format, data);
+ strbuf_addch(scratch, '\n');
+ }
+
batch_write(opt, scratch->buf, scratch->len);
if (opt->batch_mode == BATCH_MODE_CONTENTS) {
@@ -646,6 +659,8 @@ static void batch_objects_command(struct batch_options *opt,
strbuf_release(&input);
}
+#define DEFAULT_FORMAT "%(objectname) %(objecttype) %(objectsize)"
+
static int batch_objects(struct batch_options *opt)
{
struct strbuf input = STRBUF_INIT;
@@ -654,9 +669,6 @@ static int batch_objects(struct batch_options *opt)
int save_warning;
int retval = 0;
- if (!opt->format)
- opt->format = "%(objectname) %(objecttype) %(objectsize)";
-
/*
* Expand once with our special mark_query flag, which will prime the
* object_info to be handed to oid_object_info_extended for each
@@ -664,12 +676,17 @@ static int batch_objects(struct batch_options *opt)
*/
memset(&data, 0, sizeof(data));
data.mark_query = 1;
- strbuf_expand(&output, opt->format, expand_format, &data);
+ strbuf_expand(&output,
+ opt->format ? opt->format : DEFAULT_FORMAT,
+ expand_format,
+ &data);
data.mark_query = 0;
strbuf_release(&output);
if (opt->transform_mode)
data.split_on_whitespace = 1;
+ if (opt->format && !strcmp(opt->format, DEFAULT_FORMAT))
+ opt->format = NULL;
/*
* If we are printing out the object, then always fill in the type,
* since we will want to decide whether or not to stream.
diff --git a/builtin/clone.c b/builtin/clone.c
index 0aea177660..194d50f75f 100644
--- a/builtin/clone.c
+++ b/builtin/clone.c
@@ -33,6 +33,7 @@
#include "packfile.h"
#include "list-objects-filter-options.h"
#include "hook.h"
+#include "bundle.h"
/*
* Overall FIXMEs:
@@ -1105,8 +1106,10 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
* apply the remote name provided by --origin only after this second
* call to git_config, to ensure it overrides all config-based values.
*/
- if (option_origin != NULL)
+ if (option_origin != NULL) {
+ free(remote_name);
remote_name = xstrdup(option_origin);
+ }
if (remote_name == NULL)
remote_name = xstrdup("origin");
@@ -1172,6 +1175,18 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
warning(_("--local is ignored"));
transport->cloning = 1;
+ if (is_bundle) {
+ struct bundle_header header = BUNDLE_HEADER_INIT;
+ int fd = read_bundle_header(path, &header);
+ int has_filter = header.filter.choice != LOFC_DISABLED;
+
+ if (fd > 0)
+ close(fd);
+ bundle_header_release(&header);
+ if (has_filter)
+ die(_("cannot clone from filtered bundle"));
+ }
+
transport_set_option(transport, TRANS_OPT_KEEP, "yes");
if (reject_shallow)
diff --git a/builtin/diff-tree.c b/builtin/diff-tree.c
index 0e0ac1f167..116097a404 100644
--- a/builtin/diff-tree.c
+++ b/builtin/diff-tree.c
@@ -195,6 +195,7 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix)
int saved_dcctc = 0;
opt->diffopt.rotate_to_strict = 0;
+ opt->diffopt.no_free = 1;
if (opt->diffopt.detect_rename) {
if (!the_index.cache)
repo_read_index(the_repository);
@@ -217,6 +218,8 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix)
}
opt->diffopt.degraded_cc_to_c = saved_dcctc;
opt->diffopt.needed_rename_limit = saved_nrl;
+ opt->diffopt.no_free = 0;
+ diff_free(&opt->diffopt);
}
return diff_result_code(&opt->diffopt, 0);
diff --git a/builtin/fast-export.c b/builtin/fast-export.c
index a7d72697fb..1355b5a96d 100644
--- a/builtin/fast-export.c
+++ b/builtin/fast-export.c
@@ -1261,6 +1261,7 @@ int cmd_fast_export(int argc, const char **argv, const char *prefix)
revs.diffopt.format_callback = show_filemodify;
revs.diffopt.format_callback_data = &paths_of_changed_objects;
revs.diffopt.flags.recursive = 1;
+ revs.diffopt.no_free = 1;
while ((commit = get_revision(&revs)))
handle_commit(commit, &revs, &paths_of_changed_objects);
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index ad045c7c2d..28d3193c38 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -865,7 +865,7 @@ static void end_packfile(void)
struct tag *t;
close_pack_windows(pack_data);
- finalize_hashfile(pack_file, cur_pack_oid.hash, 0);
+ finalize_hashfile(pack_file, cur_pack_oid.hash, FSYNC_COMPONENT_PACK, 0);
fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
pack_data->pack_name, object_count,
cur_pack_oid.hash, pack_size);
diff --git a/builtin/fetch-pack.c b/builtin/fetch-pack.c
index c2d96f4c89..f045bbbe94 100644
--- a/builtin/fetch-pack.c
+++ b/builtin/fetch-pack.c
@@ -153,11 +153,15 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
args.from_promisor = 1;
continue;
}
- if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) {
+ if (!strcmp("--refetch", arg)) {
+ args.refetch = 1;
+ continue;
+ }
+ if (skip_prefix(arg, ("--filter="), &arg)) {
parse_list_objects_filter(&args.filter_options, arg);
continue;
}
- if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
+ if (!strcmp(arg, ("--no-filter"))) {
list_objects_filter_set_no_filter(&args.filter_options);
continue;
}
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 4d12c2fd4d..e3791f09ed 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -59,7 +59,7 @@ static int prune_tags = -1; /* unspecified */
static int all, append, dry_run, force, keep, multiple, update_head_ok;
static int write_fetch_head = 1;
-static int verbosity, deepen_relative, set_upstream;
+static int verbosity, deepen_relative, set_upstream, refetch;
static int progress = -1;
static int enable_auto_gc = 1;
static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
@@ -190,6 +190,9 @@ static struct option builtin_fetch_options[] = {
OPT_SET_INT_F(0, "unshallow", &unshallow,
N_("convert to a complete repository"),
1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "refetch", &refetch,
+ N_("re-fetch without negotiating common commits"),
+ 1, PARSE_OPT_NONEG),
{ OPTION_STRING, 0, "submodule-prefix", &submodule_prefix, N_("dir"),
N_("prepend this to submodule path output"), PARSE_OPT_HIDDEN },
OPT_CALLBACK_F(0, "recurse-submodules-default",
@@ -1305,6 +1308,14 @@ static int check_exist_and_connected(struct ref *ref_map)
return -1;
/*
+ * Similarly, if we need to refetch, we always want to perform a full
+ * fetch ignoring existing objects.
+ */
+ if (refetch)
+ return -1;
+
+
+ /*
* check_connected() allows objects to merely be promised, but
* we need all direct targets to exist.
*/
@@ -1517,6 +1528,8 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes");
if (update_shallow)
set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
+ if (refetch)
+ set_option(transport, TRANS_OPT_REFETCH, "yes");
if (filter_options.choice) {
const char *spec =
expand_list_objects_filter_spec(&filter_options);
@@ -2258,13 +2271,13 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
max_children = fetch_parallel_config;
add_options_to_argv(&options);
- result = fetch_populated_submodules(the_repository,
- &options,
- submodule_prefix,
- recurse_submodules,
- recurse_submodules_default,
- verbosity < 0,
- max_children);
+ result = fetch_submodules(the_repository,
+ &options,
+ submodule_prefix,
+ recurse_submodules,
+ recurse_submodules_default,
+ verbosity < 0,
+ max_children);
strvec_clear(&options);
}
@@ -2293,8 +2306,25 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
NULL);
}
- if (enable_auto_gc)
+ if (enable_auto_gc) {
+ if (refetch) {
+ /*
+ * Hint auto-maintenance strongly to encourage repacking,
+ * but respect config settings disabling it.
+ */
+ int opt_val;
+ if (git_config_get_int("gc.autopacklimit", &opt_val))
+ opt_val = -1;
+ if (opt_val != 0)
+ git_config_push_parameter("gc.autoPackLimit=1");
+
+ if (git_config_get_int("maintenance.incremental-repack.auto", &opt_val))
+ opt_val = -1;
+ if (opt_val != 0)
+ git_config_push_parameter("maintenance.incremental-repack.auto=-1");
+ }
run_auto_maintenance(verbosity < 0);
+ }
cleanup:
string_list_clear(&list, 0);
diff --git a/builtin/fsmonitor--daemon.c b/builtin/fsmonitor--daemon.c
new file mode 100644
index 0000000000..46be55a461
--- /dev/null
+++ b/builtin/fsmonitor--daemon.c
@@ -0,0 +1,1479 @@
+#include "builtin.h"
+#include "config.h"
+#include "parse-options.h"
+#include "fsmonitor.h"
+#include "fsmonitor-ipc.h"
+#include "compat/fsmonitor/fsm-listen.h"
+#include "fsmonitor--daemon.h"
+#include "simple-ipc.h"
+#include "khash.h"
+#include "pkt-line.h"
+
+static const char * const builtin_fsmonitor__daemon_usage[] = {
+ N_("git fsmonitor--daemon start [<options>]"),
+ N_("git fsmonitor--daemon run [<options>]"),
+ N_("git fsmonitor--daemon stop"),
+ N_("git fsmonitor--daemon status"),
+ NULL
+};
+
+#ifdef HAVE_FSMONITOR_DAEMON_BACKEND
+/*
+ * Global state loaded from config.
+ */
+#define FSMONITOR__IPC_THREADS "fsmonitor.ipcthreads"
+static int fsmonitor__ipc_threads = 8;
+
+#define FSMONITOR__START_TIMEOUT "fsmonitor.starttimeout"
+static int fsmonitor__start_timeout_sec = 60;
+
+#define FSMONITOR__ANNOUNCE_STARTUP "fsmonitor.announcestartup"
+static int fsmonitor__announce_startup = 0;
+
+static int fsmonitor_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, FSMONITOR__IPC_THREADS)) {
+ int i = git_config_int(var, value);
+ if (i < 1)
+ return error(_("value of '%s' out of range: %d"),
+ FSMONITOR__IPC_THREADS, i);
+ fsmonitor__ipc_threads = i;
+ return 0;
+ }
+
+ if (!strcmp(var, FSMONITOR__START_TIMEOUT)) {
+ int i = git_config_int(var, value);
+ if (i < 0)
+ return error(_("value of '%s' out of range: %d"),
+ FSMONITOR__START_TIMEOUT, i);
+ fsmonitor__start_timeout_sec = i;
+ return 0;
+ }
+
+ if (!strcmp(var, FSMONITOR__ANNOUNCE_STARTUP)) {
+ int is_bool;
+ int i = git_config_bool_or_int(var, value, &is_bool);
+ if (i < 0)
+ return error(_("value of '%s' not bool or int: %d"),
+ var, i);
+ fsmonitor__announce_startup = i;
+ return 0;
+ }
+
+ return git_default_config(var, value, cb);
+}
+
+/*
+ * Acting as a CLIENT.
+ *
+ * Send a "quit" command to the `git-fsmonitor--daemon` (if running)
+ * and wait for it to shutdown.
+ */
+static int do_as_client__send_stop(void)
+{
+ struct strbuf answer = STRBUF_INIT;
+ int ret;
+
+ ret = fsmonitor_ipc__send_command("quit", &answer);
+
+ /* The quit command does not return any response data. */
+ strbuf_release(&answer);
+
+ if (ret)
+ return ret;
+
+ trace2_region_enter("fsm_client", "polling-for-daemon-exit", NULL);
+ while (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ sleep_millisec(50);
+ trace2_region_leave("fsm_client", "polling-for-daemon-exit", NULL);
+
+ return 0;
+}
+
+static int do_as_client__status(void)
+{
+ enum ipc_active_state state = fsmonitor_ipc__get_state();
+
+ switch (state) {
+ case IPC_STATE__LISTENING:
+ printf(_("fsmonitor-daemon is watching '%s'\n"),
+ the_repository->worktree);
+ return 0;
+
+ default:
+ printf(_("fsmonitor-daemon is not watching '%s'\n"),
+ the_repository->worktree);
+ return 1;
+ }
+}
+
+enum fsmonitor_cookie_item_result {
+ FCIR_ERROR = -1, /* could not create cookie file ? */
+ FCIR_INIT,
+ FCIR_SEEN,
+ FCIR_ABORT,
+};
+
+struct fsmonitor_cookie_item {
+ struct hashmap_entry entry;
+ char *name;
+ enum fsmonitor_cookie_item_result result;
+};
+
+static int cookies_cmp(const void *data, const struct hashmap_entry *he1,
+ const struct hashmap_entry *he2, const void *keydata)
+{
+ const struct fsmonitor_cookie_item *a =
+ container_of(he1, const struct fsmonitor_cookie_item, entry);
+ const struct fsmonitor_cookie_item *b =
+ container_of(he2, const struct fsmonitor_cookie_item, entry);
+
+ return strcmp(a->name, keydata ? keydata : b->name);
+}
+
+static enum fsmonitor_cookie_item_result with_lock__wait_for_cookie(
+ struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ int fd;
+ struct fsmonitor_cookie_item *cookie;
+ struct strbuf cookie_pathname = STRBUF_INIT;
+ struct strbuf cookie_filename = STRBUF_INIT;
+ enum fsmonitor_cookie_item_result result;
+ int my_cookie_seq;
+
+ CALLOC_ARRAY(cookie, 1);
+
+ my_cookie_seq = state->cookie_seq++;
+
+ strbuf_addf(&cookie_filename, "%i-%i", getpid(), my_cookie_seq);
+
+ strbuf_addbuf(&cookie_pathname, &state->path_cookie_prefix);
+ strbuf_addbuf(&cookie_pathname, &cookie_filename);
+
+ cookie->name = strbuf_detach(&cookie_filename, NULL);
+ cookie->result = FCIR_INIT;
+ hashmap_entry_init(&cookie->entry, strhash(cookie->name));
+
+ hashmap_add(&state->cookies, &cookie->entry);
+
+ trace_printf_key(&trace_fsmonitor, "cookie-wait: '%s' '%s'",
+ cookie->name, cookie_pathname.buf);
+
+ /*
+ * Create the cookie file on disk and then wait for a notification
+ * that the listener thread has seen it.
+ */
+ fd = open(cookie_pathname.buf, O_WRONLY | O_CREAT | O_EXCL, 0600);
+ if (fd < 0) {
+ error_errno(_("could not create fsmonitor cookie '%s'"),
+ cookie->name);
+
+ cookie->result = FCIR_ERROR;
+ goto done;
+ }
+
+ /*
+ * Technically, close() and unlink() can fail, but we don't
+ * care here. We only created the file to trigger a watch
+ * event from the FS to know that when we're up to date.
+ */
+ close(fd);
+ unlink(cookie_pathname.buf);
+
+ /*
+ * Technically, this is an infinite wait (well, unless another
+ * thread sends us an abort). I'd like to change this to
+ * use `pthread_cond_timedwait()` and return an error/timeout
+ * and let the caller do the trivial response thing, but we
+ * don't have that routine in our thread-utils.
+ *
+ * After extensive beta testing I'm not really worried about
+ * this. Also note that the above open() and unlink() calls
+ * will cause at least two FS events on that path, so the odds
+ * of getting stuck are pretty slim.
+ */
+ while (cookie->result == FCIR_INIT)
+ pthread_cond_wait(&state->cookies_cond,
+ &state->main_lock);
+
+done:
+ hashmap_remove(&state->cookies, &cookie->entry, NULL);
+
+ result = cookie->result;
+
+ free(cookie->name);
+ free(cookie);
+ strbuf_release(&cookie_pathname);
+
+ return result;
+}
+
+/*
+ * Mark these cookies as _SEEN and wake up the corresponding client threads.
+ */
+static void with_lock__mark_cookies_seen(struct fsmonitor_daemon_state *state,
+ const struct string_list *cookie_names)
+{
+ /* assert current thread holding state->main_lock */
+
+ int k;
+ int nr_seen = 0;
+
+ for (k = 0; k < cookie_names->nr; k++) {
+ struct fsmonitor_cookie_item key;
+ struct fsmonitor_cookie_item *cookie;
+
+ key.name = cookie_names->items[k].string;
+ hashmap_entry_init(&key.entry, strhash(key.name));
+
+ cookie = hashmap_get_entry(&state->cookies, &key, entry, NULL);
+ if (cookie) {
+ trace_printf_key(&trace_fsmonitor, "cookie-seen: '%s'",
+ cookie->name);
+ cookie->result = FCIR_SEEN;
+ nr_seen++;
+ }
+ }
+
+ if (nr_seen)
+ pthread_cond_broadcast(&state->cookies_cond);
+}
+
+/*
+ * Set _ABORT on all pending cookies and wake up all client threads.
+ */
+static void with_lock__abort_all_cookies(struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ struct hashmap_iter iter;
+ struct fsmonitor_cookie_item *cookie;
+ int nr_aborted = 0;
+
+ hashmap_for_each_entry(&state->cookies, &iter, cookie, entry) {
+ trace_printf_key(&trace_fsmonitor, "cookie-abort: '%s'",
+ cookie->name);
+ cookie->result = FCIR_ABORT;
+ nr_aborted++;
+ }
+
+ if (nr_aborted)
+ pthread_cond_broadcast(&state->cookies_cond);
+}
+
+/*
+ * Requests to and from a FSMonitor Protocol V2 provider use an opaque
+ * "token" as a virtual timestamp. Clients can request a summary of all
+ * created/deleted/modified files relative to a token. In the response,
+ * clients receive a new token for the next (relative) request.
+ *
+ *
+ * Token Format
+ * ============
+ *
+ * The contents of the token are private and provider-specific.
+ *
+ * For the built-in fsmonitor--daemon, we define a token as follows:
+ *
+ * "builtin" ":" <token_id> ":" <sequence_nr>
+ *
+ * The "builtin" prefix is used as a namespace to avoid conflicts
+ * with other providers (such as Watchman).
+ *
+ * The <token_id> is an arbitrary OPAQUE string, such as a GUID,
+ * UUID, or {timestamp,pid}. It is used to group all filesystem
+ * events that happened while the daemon was monitoring (and in-sync
+ * with the filesystem).
+ *
+ * Unlike FSMonitor Protocol V1, it is not defined as a timestamp
+ * and does not define less-than/greater-than relationships.
+ * (There are too many race conditions to rely on file system
+ * event timestamps.)
+ *
+ * The <sequence_nr> is a simple integer incremented whenever the
+ * daemon needs to make its state public. For example, if 1000 file
+ * system events come in, but no clients have requested the data,
+ * the daemon can continue to accumulate file changes in the same
+ * bin and does not need to advance the sequence number. However,
+ * as soon as a client does arrive, the daemon needs to start a new
+ * bin and increment the sequence number.
+ *
+ * The sequence number serves as the boundary between 2 sets
+ * of bins -- the older ones that the client has already seen
+ * and the newer ones that it hasn't.
+ *
+ * When a new <token_id> is created, the <sequence_nr> is reset to
+ * zero.
+ *
+ *
+ * About Token Ids
+ * ===============
+ *
+ * A new token_id is created:
+ *
+ * [1] each time the daemon is started.
+ *
+ * [2] any time that the daemon must re-sync with the filesystem
+ * (such as when the kernel drops or we miss events on a very
+ * active volume).
+ *
+ * [3] in response to a client "flush" command (for dropped event
+ * testing).
+ *
+ * When a new token_id is created, the daemon is free to discard all
+ * cached filesystem events associated with any previous token_ids.
+ * Events associated with a non-current token_id will never be sent
+ * to a client. A token_id change implicitly means that the daemon
+ * has gap in its event history.
+ *
+ * Therefore, clients that present a token with a stale (non-current)
+ * token_id will always be given a trivial response.
+ */
+struct fsmonitor_token_data {
+ struct strbuf token_id;
+ struct fsmonitor_batch *batch_head;
+ struct fsmonitor_batch *batch_tail;
+ uint64_t client_ref_count;
+};
+
+struct fsmonitor_batch {
+ struct fsmonitor_batch *next;
+ uint64_t batch_seq_nr;
+ const char **interned_paths;
+ size_t nr, alloc;
+ time_t pinned_time;
+};
+
+static struct fsmonitor_token_data *fsmonitor_new_token_data(void)
+{
+ static int test_env_value = -1;
+ static uint64_t flush_count = 0;
+ struct fsmonitor_token_data *token;
+ struct fsmonitor_batch *batch;
+
+ CALLOC_ARRAY(token, 1);
+ batch = fsmonitor_batch__new();
+
+ strbuf_init(&token->token_id, 0);
+ token->batch_head = batch;
+ token->batch_tail = batch;
+ token->client_ref_count = 0;
+
+ if (test_env_value < 0)
+ test_env_value = git_env_bool("GIT_TEST_FSMONITOR_TOKEN", 0);
+
+ if (!test_env_value) {
+ struct timeval tv;
+ struct tm tm;
+ time_t secs;
+
+ gettimeofday(&tv, NULL);
+ secs = tv.tv_sec;
+ gmtime_r(&secs, &tm);
+
+ strbuf_addf(&token->token_id,
+ "%"PRIu64".%d.%4d%02d%02dT%02d%02d%02d.%06ldZ",
+ flush_count++,
+ getpid(),
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ (long)tv.tv_usec);
+ } else {
+ strbuf_addf(&token->token_id, "test_%08x", test_env_value++);
+ }
+
+ /*
+ * We created a new <token_id> and are starting a new series
+ * of tokens with a zero <seq_nr>.
+ *
+ * Since clients cannot guess our new (non test) <token_id>
+ * they will always receive a trivial response (because of the
+ * mismatch on the <token_id>). The trivial response will
+ * tell them our new <token_id> so that subsequent requests
+ * will be relative to our new series. (And when sending that
+ * response, we pin the current head of the batch list.)
+ *
+ * Even if the client correctly guesses the <token_id>, their
+ * request of "builtin:<token_id>:0" asks for all changes MORE
+ * RECENT than batch/bin 0.
+ *
+ * This implies that it is a waste to accumulate paths in the
+ * initial batch/bin (because they will never be transmitted).
+ *
+ * So the daemon could be running for days and watching the
+ * file system, but doesn't need to actually accumulate any
+ * paths UNTIL we need to set a reference point for a later
+ * relative request.
+ *
+ * However, it is very useful for testing to always have a
+ * reference point set. Pin batch 0 to force early file system
+ * events to accumulate.
+ */
+ if (test_env_value)
+ batch->pinned_time = time(NULL);
+
+ return token;
+}
+
+struct fsmonitor_batch *fsmonitor_batch__new(void)
+{
+ struct fsmonitor_batch *batch;
+
+ CALLOC_ARRAY(batch, 1);
+
+ return batch;
+}
+
+void fsmonitor_batch__free_list(struct fsmonitor_batch *batch)
+{
+ while (batch) {
+ struct fsmonitor_batch *next = batch->next;
+
+ /*
+ * The actual strings within the array of this batch
+ * are interned, so we don't own them. We only own
+ * the array.
+ */
+ free(batch->interned_paths);
+ free(batch);
+
+ batch = next;
+ }
+}
+
+void fsmonitor_batch__add_path(struct fsmonitor_batch *batch,
+ const char *path)
+{
+ const char *interned_path = strintern(path);
+
+ trace_printf_key(&trace_fsmonitor, "event: %s", interned_path);
+
+ ALLOC_GROW(batch->interned_paths, batch->nr + 1, batch->alloc);
+ batch->interned_paths[batch->nr++] = interned_path;
+}
+
+static void fsmonitor_batch__combine(struct fsmonitor_batch *batch_dest,
+ const struct fsmonitor_batch *batch_src)
+{
+ size_t k;
+
+ ALLOC_GROW(batch_dest->interned_paths,
+ batch_dest->nr + batch_src->nr + 1,
+ batch_dest->alloc);
+
+ for (k = 0; k < batch_src->nr; k++)
+ batch_dest->interned_paths[batch_dest->nr++] =
+ batch_src->interned_paths[k];
+}
+
+/*
+ * To keep the batch list from growing unbounded in response to filesystem
+ * activity, we try to truncate old batches from the end of the list as
+ * they become irrelevant.
+ *
+ * We assume that the .git/index will be updated with the most recent token
+ * any time the index is updated. And future commands will only ask for
+ * recent changes *since* that new token. So as tokens advance into the
+ * future, older batch items will never be requested/needed. So we can
+ * truncate them without loss of functionality.
+ *
+ * However, multiple commands may be talking to the daemon concurrently
+ * or perform a slow command, so a little "token skew" is possible.
+ * Therefore, we want this to be a little bit lazy and have a generous
+ * delay.
+ *
+ * The current reader thread walked backwards in time from `token->batch_head`
+ * back to `batch_marker` somewhere in the middle of the batch list.
+ *
+ * Let's walk backwards in time from that marker an arbitrary delay
+ * and truncate the list there. Note that these timestamps are completely
+ * artificial (based on when we pinned the batch item) and not on any
+ * filesystem activity.
+ *
+ * Return the obsolete portion of the list after we have removed it from
+ * the official list so that the caller can free it after leaving the lock.
+ */
+#define MY_TIME_DELAY_SECONDS (5 * 60) /* seconds */
+
+static struct fsmonitor_batch *with_lock__truncate_old_batches(
+ struct fsmonitor_daemon_state *state,
+ const struct fsmonitor_batch *batch_marker)
+{
+ /* assert current thread holding state->main_lock */
+
+ const struct fsmonitor_batch *batch;
+ struct fsmonitor_batch *remainder;
+
+ if (!batch_marker)
+ return NULL;
+
+ trace_printf_key(&trace_fsmonitor, "Truncate: mark (%"PRIu64",%"PRIu64")",
+ batch_marker->batch_seq_nr,
+ (uint64_t)batch_marker->pinned_time);
+
+ for (batch = batch_marker; batch; batch = batch->next) {
+ time_t t;
+
+ if (!batch->pinned_time) /* an overflow batch */
+ continue;
+
+ t = batch->pinned_time + MY_TIME_DELAY_SECONDS;
+ if (t > batch_marker->pinned_time) /* too close to marker */
+ continue;
+
+ goto truncate_past_here;
+ }
+
+ return NULL;
+
+truncate_past_here:
+ state->current_token_data->batch_tail = (struct fsmonitor_batch *)batch;
+
+ remainder = ((struct fsmonitor_batch *)batch)->next;
+ ((struct fsmonitor_batch *)batch)->next = NULL;
+
+ return remainder;
+}
+
+static void fsmonitor_free_token_data(struct fsmonitor_token_data *token)
+{
+ if (!token)
+ return;
+
+ assert(token->client_ref_count == 0);
+
+ strbuf_release(&token->token_id);
+
+ fsmonitor_batch__free_list(token->batch_head);
+
+ free(token);
+}
+
+/*
+ * Flush all of our cached data about the filesystem. Call this if we
+ * lose sync with the filesystem and miss some notification events.
+ *
+ * [1] If we are missing events, then we no longer have a complete
+ * history of the directory (relative to our current start token).
+ * We should create a new token and start fresh (as if we just
+ * booted up).
+ *
+ * [2] Some of those lost events may have been for cookie files. We
+ * should assume the worst and abort them rather letting them starve.
+ *
+ * If there are no concurrent threads reading the current token data
+ * series, we can free it now. Otherwise, let the last reader free
+ * it.
+ *
+ * Either way, the old token data series is no longer associated with
+ * our state data.
+ */
+static void with_lock__do_force_resync(struct fsmonitor_daemon_state *state)
+{
+ /* assert current thread holding state->main_lock */
+
+ struct fsmonitor_token_data *free_me = NULL;
+ struct fsmonitor_token_data *new_one = NULL;
+
+ new_one = fsmonitor_new_token_data();
+
+ if (state->current_token_data->client_ref_count == 0)
+ free_me = state->current_token_data;
+ state->current_token_data = new_one;
+
+ fsmonitor_free_token_data(free_me);
+
+ with_lock__abort_all_cookies(state);
+}
+
+void fsmonitor_force_resync(struct fsmonitor_daemon_state *state)
+{
+ pthread_mutex_lock(&state->main_lock);
+ with_lock__do_force_resync(state);
+ pthread_mutex_unlock(&state->main_lock);
+}
+
+/*
+ * Format an opaque token string to send to the client.
+ */
+static void with_lock__format_response_token(
+ struct strbuf *response_token,
+ const struct strbuf *response_token_id,
+ const struct fsmonitor_batch *batch)
+{
+ /* assert current thread holding state->main_lock */
+
+ strbuf_reset(response_token);
+ strbuf_addf(response_token, "builtin:%s:%"PRIu64,
+ response_token_id->buf, batch->batch_seq_nr);
+}
+
+/*
+ * Parse an opaque token from the client.
+ * Returns -1 on error.
+ */
+static int fsmonitor_parse_client_token(const char *buf_token,
+ struct strbuf *requested_token_id,
+ uint64_t *seq_nr)
+{
+ const char *p;
+ char *p_end;
+
+ strbuf_reset(requested_token_id);
+ *seq_nr = 0;
+
+ if (!skip_prefix(buf_token, "builtin:", &p))
+ return -1;
+
+ while (*p && *p != ':')
+ strbuf_addch(requested_token_id, *p++);
+ if (!*p++)
+ return -1;
+
+ *seq_nr = (uint64_t)strtoumax(p, &p_end, 10);
+ if (*p_end)
+ return -1;
+
+ return 0;
+}
+
+KHASH_INIT(str, const char *, int, 0, kh_str_hash_func, kh_str_hash_equal)
+
+static int do_handle_client(struct fsmonitor_daemon_state *state,
+ const char *command,
+ ipc_server_reply_cb *reply,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct fsmonitor_token_data *token_data = NULL;
+ struct strbuf response_token = STRBUF_INIT;
+ struct strbuf requested_token_id = STRBUF_INIT;
+ struct strbuf payload = STRBUF_INIT;
+ uint64_t requested_oldest_seq_nr = 0;
+ uint64_t total_response_len = 0;
+ const char *p;
+ const struct fsmonitor_batch *batch_head;
+ const struct fsmonitor_batch *batch;
+ struct fsmonitor_batch *remainder = NULL;
+ intmax_t count = 0, duplicates = 0;
+ kh_str_t *shown;
+ int hash_ret;
+ int do_trivial = 0;
+ int do_flush = 0;
+ int do_cookie = 0;
+ enum fsmonitor_cookie_item_result cookie_result;
+
+ /*
+ * We expect `command` to be of the form:
+ *
+ * <command> := quit NUL
+ * | flush NUL
+ * | <V1-time-since-epoch-ns> NUL
+ * | <V2-opaque-fsmonitor-token> NUL
+ */
+
+ if (!strcmp(command, "quit")) {
+ /*
+ * A client has requested over the socket/pipe that the
+ * daemon shutdown.
+ *
+ * Tell the IPC thread pool to shutdown (which completes
+ * the await in the main thread (which can stop the
+ * fsmonitor listener thread)).
+ *
+ * There is no reply to the client.
+ */
+ return SIMPLE_IPC_QUIT;
+
+ } else if (!strcmp(command, "flush")) {
+ /*
+ * Flush all of our cached data and generate a new token
+ * just like if we lost sync with the filesystem.
+ *
+ * Then send a trivial response using the new token.
+ */
+ do_flush = 1;
+ do_trivial = 1;
+
+ } else if (!skip_prefix(command, "builtin:", &p)) {
+ /* assume V1 timestamp or garbage */
+
+ char *p_end;
+
+ strtoumax(command, &p_end, 10);
+ trace_printf_key(&trace_fsmonitor,
+ ((*p_end) ?
+ "fsmonitor: invalid command line '%s'" :
+ "fsmonitor: unsupported V1 protocol '%s'"),
+ command);
+ do_trivial = 1;
+
+ } else {
+ /* We have "builtin:*" */
+ if (fsmonitor_parse_client_token(command, &requested_token_id,
+ &requested_oldest_seq_nr)) {
+ trace_printf_key(&trace_fsmonitor,
+ "fsmonitor: invalid V2 protocol token '%s'",
+ command);
+ do_trivial = 1;
+
+ } else {
+ /*
+ * We have a V2 valid token:
+ * "builtin:<token_id>:<seq_nr>"
+ */
+ do_cookie = 1;
+ }
+ }
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (!state->current_token_data)
+ BUG("fsmonitor state does not have a current token");
+
+ /*
+ * Write a cookie file inside the directory being watched in
+ * an effort to flush out existing filesystem events that we
+ * actually care about. Suspend this client thread until we
+ * see the filesystem events for this cookie file.
+ *
+ * Creating the cookie lets us guarantee that our FS listener
+ * thread has drained the kernel queue and we are caught up
+ * with the kernel.
+ *
+ * If we cannot create the cookie (or otherwise guarantee that
+ * we are caught up), we send a trivial response. We have to
+ * assume that there might be some very, very recent activity
+ * on the FS still in flight.
+ */
+ if (do_cookie) {
+ cookie_result = with_lock__wait_for_cookie(state);
+ if (cookie_result != FCIR_SEEN) {
+ error(_("fsmonitor: cookie_result '%d' != SEEN"),
+ cookie_result);
+ do_trivial = 1;
+ }
+ }
+
+ if (do_flush)
+ with_lock__do_force_resync(state);
+
+ /*
+ * We mark the current head of the batch list as "pinned" so
+ * that the listener thread will treat this item as read-only
+ * (and prevent any more paths from being added to it) from
+ * now on.
+ */
+ token_data = state->current_token_data;
+ batch_head = token_data->batch_head;
+ ((struct fsmonitor_batch *)batch_head)->pinned_time = time(NULL);
+
+ /*
+ * FSMonitor Protocol V2 requires that we send a response header
+ * with a "new current token" and then all of the paths that changed
+ * since the "requested token". We send the seq_nr of the just-pinned
+ * head batch so that future requests from a client will be relative
+ * to it.
+ */
+ with_lock__format_response_token(&response_token,
+ &token_data->token_id, batch_head);
+
+ reply(reply_data, response_token.buf, response_token.len + 1);
+ total_response_len += response_token.len + 1;
+
+ trace2_data_string("fsmonitor", the_repository, "response/token",
+ response_token.buf);
+ trace_printf_key(&trace_fsmonitor, "response token: %s",
+ response_token.buf);
+
+ if (!do_trivial) {
+ if (strcmp(requested_token_id.buf, token_data->token_id.buf)) {
+ /*
+ * The client last spoke to a different daemon
+ * instance -OR- the daemon had to resync with
+ * the filesystem (and lost events), so reject.
+ */
+ trace2_data_string("fsmonitor", the_repository,
+ "response/token", "different");
+ do_trivial = 1;
+
+ } else if (requested_oldest_seq_nr <
+ token_data->batch_tail->batch_seq_nr) {
+ /*
+ * The client wants older events than we have for
+ * this token_id. This means that the end of our
+ * batch list was truncated and we cannot give the
+ * client a complete snapshot relative to their
+ * request.
+ */
+ trace_printf_key(&trace_fsmonitor,
+ "client requested truncated data");
+ do_trivial = 1;
+ }
+ }
+
+ if (do_trivial) {
+ pthread_mutex_unlock(&state->main_lock);
+
+ reply(reply_data, "/", 2);
+
+ trace2_data_intmax("fsmonitor", the_repository,
+ "response/trivial", 1);
+
+ goto cleanup;
+ }
+
+ /*
+ * We're going to hold onto a pointer to the current
+ * token-data while we walk the list of batches of files.
+ * During this time, we will NOT be under the lock.
+ * So we ref-count it.
+ *
+ * This allows the listener thread to continue prepending
+ * new batches of items to the token-data (which we'll ignore).
+ *
+ * AND it allows the listener thread to do a token-reset
+ * (and install a new `current_token_data`).
+ */
+ token_data->client_ref_count++;
+
+ pthread_mutex_unlock(&state->main_lock);
+
+ /*
+ * The client request is relative to the token that they sent,
+ * so walk the batch list backwards from the current head back
+ * to the batch (sequence number) they named.
+ *
+ * We use khash to de-dup the list of pathnames.
+ *
+ * NEEDSWORK: each batch contains a list of interned strings,
+ * so we only need to do pointer comparisons here to build the
+ * hash table. Currently, we're still comparing the string
+ * values.
+ */
+ shown = kh_init_str();
+ for (batch = batch_head;
+ batch && batch->batch_seq_nr > requested_oldest_seq_nr;
+ batch = batch->next) {
+ size_t k;
+
+ for (k = 0; k < batch->nr; k++) {
+ const char *s = batch->interned_paths[k];
+ size_t s_len;
+
+ if (kh_get_str(shown, s) != kh_end(shown))
+ duplicates++;
+ else {
+ kh_put_str(shown, s, &hash_ret);
+
+ trace_printf_key(&trace_fsmonitor,
+ "send[%"PRIuMAX"]: %s",
+ count, s);
+
+ /* Each path gets written with a trailing NUL */
+ s_len = strlen(s) + 1;
+
+ if (payload.len + s_len >=
+ LARGE_PACKET_DATA_MAX) {
+ reply(reply_data, payload.buf,
+ payload.len);
+ total_response_len += payload.len;
+ strbuf_reset(&payload);
+ }
+
+ strbuf_add(&payload, s, s_len);
+ count++;
+ }
+ }
+ }
+
+ if (payload.len) {
+ reply(reply_data, payload.buf, payload.len);
+ total_response_len += payload.len;
+ }
+
+ kh_release_str(shown);
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (token_data->client_ref_count > 0)
+ token_data->client_ref_count--;
+
+ if (token_data->client_ref_count == 0) {
+ if (token_data != state->current_token_data) {
+ /*
+ * The listener thread did a token-reset while we were
+ * walking the batch list. Therefore, this token is
+ * stale and can be discarded completely. If we are
+ * the last reader thread using this token, we own
+ * that work.
+ */
+ fsmonitor_free_token_data(token_data);
+ } else if (batch) {
+ /*
+ * We are holding the lock and are the only
+ * reader of the ref-counted portion of the
+ * list, so we get the honor of seeing if the
+ * list can be truncated to save memory.
+ *
+ * The main loop did not walk to the end of the
+ * list, so this batch is the first item in the
+ * batch-list that is older than the requested
+ * end-point sequence number. See if the tail
+ * end of the list is obsolete.
+ */
+ remainder = with_lock__truncate_old_batches(state,
+ batch);
+ }
+ }
+
+ pthread_mutex_unlock(&state->main_lock);
+
+ if (remainder)
+ fsmonitor_batch__free_list(remainder);
+
+ trace2_data_intmax("fsmonitor", the_repository, "response/length", total_response_len);
+ trace2_data_intmax("fsmonitor", the_repository, "response/count/files", count);
+ trace2_data_intmax("fsmonitor", the_repository, "response/count/duplicates", duplicates);
+
+cleanup:
+ strbuf_release(&response_token);
+ strbuf_release(&requested_token_id);
+ strbuf_release(&payload);
+
+ return 0;
+}
+
+static ipc_server_application_cb handle_client;
+
+static int handle_client(void *data,
+ const char *command, size_t command_len,
+ ipc_server_reply_cb *reply,
+ struct ipc_server_reply_data *reply_data)
+{
+ struct fsmonitor_daemon_state *state = data;
+ int result;
+
+ /*
+ * The Simple IPC API now supports {char*, len} arguments, but
+ * FSMonitor always uses proper null-terminated strings, so
+ * we can ignore the command_len argument. (Trust, but verify.)
+ */
+ if (command_len != strlen(command))
+ BUG("FSMonitor assumes text messages");
+
+ trace_printf_key(&trace_fsmonitor, "requested token: %s", command);
+
+ trace2_region_enter("fsmonitor", "handle_client", the_repository);
+ trace2_data_string("fsmonitor", the_repository, "request", command);
+
+ result = do_handle_client(state, command, reply, reply_data);
+
+ trace2_region_leave("fsmonitor", "handle_client", the_repository);
+
+ return result;
+}
+
+#define FSMONITOR_DIR "fsmonitor--daemon"
+#define FSMONITOR_COOKIE_DIR "cookies"
+#define FSMONITOR_COOKIE_PREFIX (FSMONITOR_DIR "/" FSMONITOR_COOKIE_DIR "/")
+
+enum fsmonitor_path_type fsmonitor_classify_path_workdir_relative(
+ const char *rel)
+{
+ if (fspathncmp(rel, ".git", 4))
+ return IS_WORKDIR_PATH;
+ rel += 4;
+
+ if (!*rel)
+ return IS_DOT_GIT;
+ if (*rel != '/')
+ return IS_WORKDIR_PATH; /* e.g. .gitignore */
+ rel++;
+
+ if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
+ strlen(FSMONITOR_COOKIE_PREFIX)))
+ return IS_INSIDE_DOT_GIT_WITH_COOKIE_PREFIX;
+
+ return IS_INSIDE_DOT_GIT;
+}
+
+enum fsmonitor_path_type fsmonitor_classify_path_gitdir_relative(
+ const char *rel)
+{
+ if (!fspathncmp(rel, FSMONITOR_COOKIE_PREFIX,
+ strlen(FSMONITOR_COOKIE_PREFIX)))
+ return IS_INSIDE_GITDIR_WITH_COOKIE_PREFIX;
+
+ return IS_INSIDE_GITDIR;
+}
+
+static enum fsmonitor_path_type try_classify_workdir_abs_path(
+ struct fsmonitor_daemon_state *state,
+ const char *path)
+{
+ const char *rel;
+
+ if (fspathncmp(path, state->path_worktree_watch.buf,
+ state->path_worktree_watch.len))
+ return IS_OUTSIDE_CONE;
+
+ rel = path + state->path_worktree_watch.len;
+
+ if (!*rel)
+ return IS_WORKDIR_PATH; /* it is the root dir exactly */
+ if (*rel != '/')
+ return IS_OUTSIDE_CONE;
+ rel++;
+
+ return fsmonitor_classify_path_workdir_relative(rel);
+}
+
+enum fsmonitor_path_type fsmonitor_classify_path_absolute(
+ struct fsmonitor_daemon_state *state,
+ const char *path)
+{
+ const char *rel;
+ enum fsmonitor_path_type t;
+
+ t = try_classify_workdir_abs_path(state, path);
+ if (state->nr_paths_watching == 1)
+ return t;
+ if (t != IS_OUTSIDE_CONE)
+ return t;
+
+ if (fspathncmp(path, state->path_gitdir_watch.buf,
+ state->path_gitdir_watch.len))
+ return IS_OUTSIDE_CONE;
+
+ rel = path + state->path_gitdir_watch.len;
+
+ if (!*rel)
+ return IS_GITDIR; /* it is the <gitdir> exactly */
+ if (*rel != '/')
+ return IS_OUTSIDE_CONE;
+ rel++;
+
+ return fsmonitor_classify_path_gitdir_relative(rel);
+}
+
+/*
+ * We try to combine small batches at the front of the batch-list to avoid
+ * having a long list. This hopefully makes it a little easier when we want
+ * to truncate and maintain the list. However, we don't want the paths array
+ * to just keep growing and growing with realloc, so we insert an arbitrary
+ * limit.
+ */
+#define MY_COMBINE_LIMIT (1024)
+
+void fsmonitor_publish(struct fsmonitor_daemon_state *state,
+ struct fsmonitor_batch *batch,
+ const struct string_list *cookie_names)
+{
+ if (!batch && !cookie_names->nr)
+ return;
+
+ pthread_mutex_lock(&state->main_lock);
+
+ if (batch) {
+ struct fsmonitor_batch *head;
+
+ head = state->current_token_data->batch_head;
+ if (!head) {
+ BUG("token does not have batch");
+ } else if (head->pinned_time) {
+ /*
+ * We cannot alter the current batch list
+ * because:
+ *
+ * [a] it is being transmitted to at least one
+ * client and the handle_client() thread has a
+ * ref-count, but not a lock on the batch list
+ * starting with this item.
+ *
+ * [b] it has been transmitted in the past to
+ * at least one client such that future
+ * requests are relative to this head batch.
+ *
+ * So, we can only prepend a new batch onto
+ * the front of the list.
+ */
+ batch->batch_seq_nr = head->batch_seq_nr + 1;
+ batch->next = head;
+ state->current_token_data->batch_head = batch;
+ } else if (!head->batch_seq_nr) {
+ /*
+ * Batch 0 is unpinned. See the note in
+ * `fsmonitor_new_token_data()` about why we
+ * don't need to accumulate these paths.
+ */
+ fsmonitor_batch__free_list(batch);
+ } else if (head->nr + batch->nr > MY_COMBINE_LIMIT) {
+ /*
+ * The head batch in the list has never been
+ * transmitted to a client, but folding the
+ * contents of the new batch onto it would
+ * exceed our arbitrary limit, so just prepend
+ * the new batch onto the list.
+ */
+ batch->batch_seq_nr = head->batch_seq_nr + 1;
+ batch->next = head;
+ state->current_token_data->batch_head = batch;
+ } else {
+ /*
+ * We are free to add the paths in the given
+ * batch onto the end of the current head batch.
+ */
+ fsmonitor_batch__combine(head, batch);
+ fsmonitor_batch__free_list(batch);
+ }
+ }
+
+ if (cookie_names->nr)
+ with_lock__mark_cookies_seen(state, cookie_names);
+
+ pthread_mutex_unlock(&state->main_lock);
+}
+
+static void *fsm_listen__thread_proc(void *_state)
+{
+ struct fsmonitor_daemon_state *state = _state;
+
+ trace2_thread_start("fsm-listen");
+
+ trace_printf_key(&trace_fsmonitor, "Watching: worktree '%s'",
+ state->path_worktree_watch.buf);
+ if (state->nr_paths_watching > 1)
+ trace_printf_key(&trace_fsmonitor, "Watching: gitdir '%s'",
+ state->path_gitdir_watch.buf);
+
+ fsm_listen__loop(state);
+
+ pthread_mutex_lock(&state->main_lock);
+ if (state->current_token_data &&
+ state->current_token_data->client_ref_count == 0)
+ fsmonitor_free_token_data(state->current_token_data);
+ state->current_token_data = NULL;
+ pthread_mutex_unlock(&state->main_lock);
+
+ trace2_thread_exit();
+ return NULL;
+}
+
+static int fsmonitor_run_daemon_1(struct fsmonitor_daemon_state *state)
+{
+ struct ipc_server_opts ipc_opts = {
+ .nr_threads = fsmonitor__ipc_threads,
+
+ /*
+ * We know that there are no other active threads yet,
+ * so we can let the IPC layer temporarily chdir() if
+ * it needs to when creating the server side of the
+ * Unix domain socket.
+ */
+ .uds_disallow_chdir = 0
+ };
+
+ /*
+ * Start the IPC thread pool before the we've started the file
+ * system event listener thread so that we have the IPC handle
+ * before we need it.
+ */
+ if (ipc_server_run_async(&state->ipc_server_data,
+ fsmonitor_ipc__get_path(), &ipc_opts,
+ handle_client, state))
+ return error_errno(
+ _("could not start IPC thread pool on '%s'"),
+ fsmonitor_ipc__get_path());
+
+ /*
+ * Start the fsmonitor listener thread to collect filesystem
+ * events.
+ */
+ if (pthread_create(&state->listener_thread, NULL,
+ fsm_listen__thread_proc, state) < 0) {
+ ipc_server_stop_async(state->ipc_server_data);
+ ipc_server_await(state->ipc_server_data);
+
+ return error(_("could not start fsmonitor listener thread"));
+ }
+
+ /*
+ * The daemon is now fully functional in background threads.
+ * Wait for the IPC thread pool to shutdown (whether by client
+ * request or from filesystem activity).
+ */
+ ipc_server_await(state->ipc_server_data);
+
+ /*
+ * The fsmonitor listener thread may have received a shutdown
+ * event from the IPC thread pool, but it doesn't hurt to tell
+ * it again. And wait for it to shutdown.
+ */
+ fsm_listen__stop_async(state);
+ pthread_join(state->listener_thread, NULL);
+
+ return state->error_code;
+}
+
+static int fsmonitor_run_daemon(void)
+{
+ struct fsmonitor_daemon_state state;
+ int err;
+
+ memset(&state, 0, sizeof(state));
+
+ hashmap_init(&state.cookies, cookies_cmp, NULL, 0);
+ pthread_mutex_init(&state.main_lock, NULL);
+ pthread_cond_init(&state.cookies_cond, NULL);
+ state.error_code = 0;
+ state.current_token_data = fsmonitor_new_token_data();
+
+ /* Prepare to (recursively) watch the <worktree-root> directory. */
+ strbuf_init(&state.path_worktree_watch, 0);
+ strbuf_addstr(&state.path_worktree_watch, absolute_path(get_git_work_tree()));
+ state.nr_paths_watching = 1;
+
+ /*
+ * We create and delete cookie files somewhere inside the .git
+ * directory to help us keep sync with the file system. If
+ * ".git" is not a directory, then <gitdir> is not inside the
+ * cone of <worktree-root>, so set up a second watch to watch
+ * the <gitdir> so that we get events for the cookie files.
+ */
+ strbuf_init(&state.path_gitdir_watch, 0);
+ strbuf_addbuf(&state.path_gitdir_watch, &state.path_worktree_watch);
+ strbuf_addstr(&state.path_gitdir_watch, "/.git");
+ if (!is_directory(state.path_gitdir_watch.buf)) {
+ strbuf_reset(&state.path_gitdir_watch);
+ strbuf_addstr(&state.path_gitdir_watch, absolute_path(get_git_dir()));
+ state.nr_paths_watching = 2;
+ }
+
+ /*
+ * We will write filesystem syncing cookie files into
+ * <gitdir>/<fsmonitor-dir>/<cookie-dir>/<pid>-<seq>.
+ *
+ * The extra layers of subdirectories here keep us from
+ * changing the mtime on ".git/" or ".git/foo/" when we create
+ * or delete cookie files.
+ *
+ * There have been problems with some IDEs that do a
+ * non-recursive watch of the ".git/" directory and run a
+ * series of commands any time something happens.
+ *
+ * For example, if we place our cookie files directly in
+ * ".git/" or ".git/foo/" then a `git status` (or similar
+ * command) from the IDE will cause a cookie file to be
+ * created in one of those dirs. This causes the mtime of
+ * those dirs to change. This triggers the IDE's watch
+ * notification. This triggers the IDE to run those commands
+ * again. And the process repeats and the machine never goes
+ * idle.
+ *
+ * Adding the extra layers of subdirectories prevents the
+ * mtime of ".git/" and ".git/foo" from changing when a
+ * cookie file is created.
+ */
+ strbuf_init(&state.path_cookie_prefix, 0);
+ strbuf_addbuf(&state.path_cookie_prefix, &state.path_gitdir_watch);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+ strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_DIR);
+ mkdir(state.path_cookie_prefix.buf, 0777);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+ strbuf_addstr(&state.path_cookie_prefix, FSMONITOR_COOKIE_DIR);
+ mkdir(state.path_cookie_prefix.buf, 0777);
+
+ strbuf_addch(&state.path_cookie_prefix, '/');
+
+ /*
+ * Confirm that we can create platform-specific resources for the
+ * filesystem listener before we bother starting all the threads.
+ */
+ if (fsm_listen__ctor(&state)) {
+ err = error(_("could not initialize listener thread"));
+ goto done;
+ }
+
+ err = fsmonitor_run_daemon_1(&state);
+
+done:
+ pthread_cond_destroy(&state.cookies_cond);
+ pthread_mutex_destroy(&state.main_lock);
+ fsm_listen__dtor(&state);
+
+ ipc_server_free(state.ipc_server_data);
+
+ strbuf_release(&state.path_worktree_watch);
+ strbuf_release(&state.path_gitdir_watch);
+ strbuf_release(&state.path_cookie_prefix);
+
+ return err;
+}
+
+static int try_to_run_foreground_daemon(int detach_console)
+{
+ /*
+ * Technically, we don't need to probe for an existing daemon
+ * process, since we could just call `fsmonitor_run_daemon()`
+ * and let it fail if the pipe/socket is busy.
+ *
+ * However, this method gives us a nicer error message for a
+ * common error case.
+ */
+ if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ die(_("fsmonitor--daemon is already running '%s'"),
+ the_repository->worktree);
+
+ if (fsmonitor__announce_startup) {
+ fprintf(stderr, _("running fsmonitor-daemon in '%s'\n"),
+ the_repository->worktree);
+ fflush(stderr);
+ }
+
+#ifdef GIT_WINDOWS_NATIVE
+ if (detach_console)
+ FreeConsole();
+#endif
+
+ return !!fsmonitor_run_daemon();
+}
+
+static start_bg_wait_cb bg_wait_cb;
+
+static int bg_wait_cb(const struct child_process *cp, void *cb_data)
+{
+ enum ipc_active_state s = fsmonitor_ipc__get_state();
+
+ switch (s) {
+ case IPC_STATE__LISTENING:
+ /* child is "ready" */
+ return 0;
+
+ case IPC_STATE__NOT_LISTENING:
+ case IPC_STATE__PATH_NOT_FOUND:
+ /* give child more time */
+ return 1;
+
+ default:
+ case IPC_STATE__INVALID_PATH:
+ case IPC_STATE__OTHER_ERROR:
+ /* all the time in world won't help */
+ return -1;
+ }
+}
+
+static int try_to_start_background_daemon(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ enum start_bg_result sbgr;
+
+ /*
+ * Before we try to create a background daemon process, see
+ * if a daemon process is already listening. This makes it
+ * easier for us to report an already-listening error to the
+ * console, since our spawn/daemon can only report the success
+ * of creating the background process (and not whether it
+ * immediately exited).
+ */
+ if (fsmonitor_ipc__get_state() == IPC_STATE__LISTENING)
+ die(_("fsmonitor--daemon is already running '%s'"),
+ the_repository->worktree);
+
+ if (fsmonitor__announce_startup) {
+ fprintf(stderr, _("starting fsmonitor-daemon in '%s'\n"),
+ the_repository->worktree);
+ fflush(stderr);
+ }
+
+ cp.git_cmd = 1;
+
+ strvec_push(&cp.args, "fsmonitor--daemon");
+ strvec_push(&cp.args, "run");
+ strvec_push(&cp.args, "--detach");
+ strvec_pushf(&cp.args, "--ipc-threads=%d", fsmonitor__ipc_threads);
+
+ cp.no_stdin = 1;
+ cp.no_stdout = 1;
+ cp.no_stderr = 1;
+
+ sbgr = start_bg_command(&cp, bg_wait_cb, NULL,
+ fsmonitor__start_timeout_sec);
+
+ switch (sbgr) {
+ case SBGR_READY:
+ return 0;
+
+ default:
+ case SBGR_ERROR:
+ case SBGR_CB_ERROR:
+ return error(_("daemon failed to start"));
+
+ case SBGR_TIMEOUT:
+ return error(_("daemon not online yet"));
+
+ case SBGR_DIED:
+ return error(_("daemon terminated"));
+ }
+}
+
+int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
+{
+ const char *subcmd;
+ int detach_console = 0;
+
+ struct option options[] = {
+ OPT_BOOL(0, "detach", &detach_console, N_("detach from console")),
+ OPT_INTEGER(0, "ipc-threads",
+ &fsmonitor__ipc_threads,
+ N_("use <n> ipc worker threads")),
+ OPT_INTEGER(0, "start-timeout",
+ &fsmonitor__start_timeout_sec,
+ N_("max seconds to wait for background daemon startup")),
+
+ OPT_END()
+ };
+
+ git_config(fsmonitor_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, options,
+ builtin_fsmonitor__daemon_usage, 0);
+ if (argc != 1)
+ usage_with_options(builtin_fsmonitor__daemon_usage, options);
+ subcmd = argv[0];
+
+ if (fsmonitor__ipc_threads < 1)
+ die(_("invalid 'ipc-threads' value (%d)"),
+ fsmonitor__ipc_threads);
+
+ if (!strcmp(subcmd, "start"))
+ return !!try_to_start_background_daemon();
+
+ if (!strcmp(subcmd, "run"))
+ return !!try_to_run_foreground_daemon(detach_console);
+
+ if (!strcmp(subcmd, "stop"))
+ return !!do_as_client__send_stop();
+
+ if (!strcmp(subcmd, "status"))
+ return !!do_as_client__status();
+
+ die(_("Unhandled subcommand '%s'"), subcmd);
+}
+
+#else
+int cmd_fsmonitor__daemon(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_fsmonitor__daemon_usage, options);
+
+ die(_("fsmonitor--daemon not supported on this platform"));
+}
+#endif
diff --git a/builtin/gc.c b/builtin/gc.c
index ffaf0daf5d..b335cffa33 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -30,7 +30,6 @@
#include "promisor-remote.h"
#include "refs.h"
#include "remote.h"
-#include "object-store.h"
#include "exec-cmd.h"
#include "hook.h"
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 8a5a644744..680b66b063 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -1291,7 +1291,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
nr_objects - nr_objects_initial);
stop_progress_msg(&progress, msg.buf);
strbuf_release(&msg);
- finalize_hashfile(f, tail_hash, 0);
+ finalize_hashfile(f, tail_hash, FSYNC_COMPONENT_PACK, 0);
hashcpy(read_hash, pack_hash);
fixup_pack_header_footer(output_fd, pack_hash,
curr_pack, nr_objects,
@@ -1513,7 +1513,7 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
if (!from_stdin) {
close(input_fd);
} else {
- fsync_or_die(output_fd, curr_pack_name);
+ fsync_component_or_die(FSYNC_COMPONENT_PACK, output_fd, curr_pack_name);
err = close(output_fd);
if (err)
die_errno(_("error while closing pack file"));
diff --git a/builtin/log.c b/builtin/log.c
index c211d66d1d..3ac479bec3 100644
--- a/builtin/log.c
+++ b/builtin/log.c
@@ -417,7 +417,7 @@ static void finish_early_output(struct rev_info *rev)
show_early_header(rev, "done", n);
}
-static int cmd_log_walk(struct rev_info *rev)
+static int cmd_log_walk_no_free(struct rev_info *rev)
{
struct commit *commit;
int saved_nrl = 0;
@@ -444,7 +444,6 @@ static int cmd_log_walk(struct rev_info *rev)
* and HAS_CHANGES being accumulated in rev->diffopt, so be careful to
* retain that state information if replacing rev->diffopt in this loop
*/
- rev->diffopt.no_free = 1;
while ((commit = get_revision(rev)) != NULL) {
if (!log_tree_commit(rev, commit) && rev->max_count >= 0)
/*
@@ -469,8 +468,6 @@ static int cmd_log_walk(struct rev_info *rev)
}
rev->diffopt.degraded_cc_to_c = saved_dcctc;
rev->diffopt.needed_rename_limit = saved_nrl;
- rev->diffopt.no_free = 0;
- diff_free(&rev->diffopt);
if (rev->remerge_diff) {
tmp_objdir_destroy(rev->remerge_objdir);
@@ -484,6 +481,17 @@ static int cmd_log_walk(struct rev_info *rev)
return diff_result_code(&rev->diffopt, 0);
}
+static int cmd_log_walk(struct rev_info *rev)
+{
+ int retval;
+
+ rev->diffopt.no_free = 1;
+ retval = cmd_log_walk_no_free(rev);
+ rev->diffopt.no_free = 0;
+ diff_free(&rev->diffopt);
+ return retval;
+}
+
static int git_log_config(const char *var, const char *value, void *cb)
{
const char *slot_name;
@@ -680,6 +688,7 @@ int cmd_show(int argc, const char **argv, const char *prefix)
count = rev.pending.nr;
objects = rev.pending.objects;
+ rev.diffopt.no_free = 1;
for (i = 0; i < count && !ret; i++) {
struct object *o = objects[i].item;
const char *name = objects[i].name;
@@ -725,12 +734,16 @@ int cmd_show(int argc, const char **argv, const char *prefix)
rev.pending.nr = rev.pending.alloc = 0;
rev.pending.objects = NULL;
add_object_array(o, name, &rev.pending);
- ret = cmd_log_walk(&rev);
+ ret = cmd_log_walk_no_free(&rev);
break;
default:
ret = error(_("unknown type: %d"), o->type);
}
}
+
+ rev.diffopt.no_free = 0;
+ diff_free(&rev.diffopt);
+
free(objects);
return ret;
}
@@ -1883,6 +1896,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
rev.diff = 1;
rev.max_parents = 1;
rev.diffopt.flags.recursive = 1;
+ rev.diffopt.no_free = 1;
rev.subject_prefix = fmt_patch_subject_prefix;
memset(&s_r_opt, 0, sizeof(s_r_opt));
s_r_opt.def = "HEAD";
@@ -2008,13 +2022,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
if (use_stdout) {
setup_pager();
- } else if (rev.diffopt.close_file) {
- /*
- * The diff code parsed --output; it has already opened the
- * file, but we must instruct it not to close after each diff.
- */
- rev.diffopt.no_free = 1;
- } else {
+ } else if (!rev.diffopt.close_file) {
int saved;
if (!output_directory)
diff --git a/builtin/ls-tree.c b/builtin/ls-tree.c
index 6cb554cbb0..e279be8bb6 100644
--- a/builtin/ls-tree.c
+++ b/builtin/ls-tree.c
@@ -16,22 +16,102 @@
static int line_termination = '\n';
#define LS_RECURSIVE 1
-#define LS_TREE_ONLY 2
-#define LS_SHOW_TREES 4
-#define LS_NAME_ONLY 8
-#define LS_SHOW_SIZE 16
+#define LS_TREE_ONLY (1 << 1)
+#define LS_SHOW_TREES (1 << 2)
static int abbrev;
static int ls_options;
static struct pathspec pathspec;
static int chomp_prefix;
static const char *ls_tree_prefix;
+static const char *format;
+struct show_tree_data {
+ unsigned mode;
+ enum object_type type;
+ const struct object_id *oid;
+ const char *pathname;
+ struct strbuf *base;
+};
static const char * const ls_tree_usage[] = {
N_("git ls-tree [<options>] <tree-ish> [<path>...]"),
NULL
};
-static int show_recursive(const char *base, int baselen, const char *pathname)
+static enum ls_tree_cmdmode {
+ MODE_DEFAULT = 0,
+ MODE_LONG,
+ MODE_NAME_ONLY,
+ MODE_NAME_STATUS,
+ MODE_OBJECT_ONLY,
+} cmdmode;
+
+static void expand_objectsize(struct strbuf *line, const struct object_id *oid,
+ const enum object_type type, unsigned int padded)
+{
+ if (type == OBJ_BLOB) {
+ unsigned long size;
+ if (oid_object_info(the_repository, oid, &size) < 0)
+ die(_("could not get object info about '%s'"),
+ oid_to_hex(oid));
+ if (padded)
+ strbuf_addf(line, "%7"PRIuMAX, (uintmax_t)size);
+ else
+ strbuf_addf(line, "%"PRIuMAX, (uintmax_t)size);
+ } else if (padded) {
+ strbuf_addf(line, "%7s", "-");
+ } else {
+ strbuf_addstr(line, "-");
+ }
+}
+
+static size_t expand_show_tree(struct strbuf *sb, const char *start,
+ void *context)
+{
+ struct show_tree_data *data = context;
+ const char *end;
+ const char *p;
+ unsigned int errlen;
+ size_t len = strbuf_expand_literal_cb(sb, start, NULL);
+
+ if (len)
+ return len;
+ if (*start != '(')
+ die(_("bad ls-tree format: element '%s' does not start with '('"), start);
+
+ end = strchr(start + 1, ')');
+ if (!end)
+ die(_("bad ls-tree format: element '%s' does not end in ')'"), start);
+
+ len = end - start + 1;
+ if (skip_prefix(start, "(objectmode)", &p)) {
+ strbuf_addf(sb, "%06o", data->mode);
+ } else if (skip_prefix(start, "(objecttype)", &p)) {
+ strbuf_addstr(sb, type_name(data->type));
+ } else if (skip_prefix(start, "(objectsize:padded)", &p)) {
+ expand_objectsize(sb, data->oid, data->type, 1);
+ } else if (skip_prefix(start, "(objectsize)", &p)) {
+ expand_objectsize(sb, data->oid, data->type, 0);
+ } else if (skip_prefix(start, "(objectname)", &p)) {
+ strbuf_add_unique_abbrev(sb, data->oid, abbrev);
+ } else if (skip_prefix(start, "(path)", &p)) {
+ const char *name = data->base->buf;
+ const char *prefix = chomp_prefix ? ls_tree_prefix : NULL;
+ struct strbuf quoted = STRBUF_INIT;
+ struct strbuf sbuf = STRBUF_INIT;
+ strbuf_addstr(data->base, data->pathname);
+ name = relative_path(data->base->buf, prefix, &sbuf);
+ quote_c_style(name, &quoted, NULL, 0);
+ strbuf_addbuf(sb, &quoted);
+ strbuf_release(&sbuf);
+ strbuf_release(&quoted);
+ } else {
+ errlen = (unsigned long)len;
+ die(_("bad ls-tree format: %%%.*s"), errlen, start);
+ }
+ return len;
+}
+
+static int show_recursive(const char *base, size_t baselen, const char *pathname)
{
int i;
@@ -43,7 +123,7 @@ static int show_recursive(const char *base, int baselen, const char *pathname)
for (i = 0; i < pathspec.nr; i++) {
const char *spec = pathspec.items[i].match;
- int len, speclen;
+ size_t len, speclen;
if (strncmp(base, spec, baselen))
continue;
@@ -61,69 +141,197 @@ static int show_recursive(const char *base, int baselen, const char *pathname)
return 0;
}
-static int show_tree(const struct object_id *oid, struct strbuf *base,
- const char *pathname, unsigned mode, void *context)
+static int show_tree_fmt(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
{
- int retval = 0;
- int baselen;
- const char *type = blob_type;
-
- if (S_ISGITLINK(mode)) {
- /*
- * Maybe we want to have some recursive version here?
- *
- * Something similar to this incomplete example:
- *
- if (show_subprojects(base, baselen, pathname))
- retval = READ_TREE_RECURSIVE;
- *
- */
- type = commit_type;
- } else if (S_ISDIR(mode)) {
- if (show_recursive(base->buf, base->len, pathname)) {
- retval = READ_TREE_RECURSIVE;
- if (!(ls_options & LS_SHOW_TREES))
- return retval;
- }
- type = tree_type;
- }
- else if (ls_options & LS_TREE_ONLY)
+ size_t baselen;
+ int recurse = 0;
+ struct strbuf sb = STRBUF_INIT;
+ enum object_type type = object_type(mode);
+
+ struct show_tree_data data = {
+ .mode = mode,
+ .type = type,
+ .oid = oid,
+ .pathname = pathname,
+ .base = base,
+ };
+
+ if (type == OBJ_TREE && show_recursive(base->buf, base->len, pathname))
+ recurse = READ_TREE_RECURSIVE;
+ if (type == OBJ_TREE && recurse && !(ls_options & LS_SHOW_TREES))
+ return recurse;
+ if (type == OBJ_BLOB && (ls_options & LS_TREE_ONLY))
return 0;
- if (!(ls_options & LS_NAME_ONLY)) {
- if (ls_options & LS_SHOW_SIZE) {
- char size_text[24];
- if (!strcmp(type, blob_type)) {
- unsigned long size;
- if (oid_object_info(the_repository, oid, &size) == OBJ_BAD)
- xsnprintf(size_text, sizeof(size_text),
- "BAD");
- else
- xsnprintf(size_text, sizeof(size_text),
- "%"PRIuMAX, (uintmax_t)size);
- } else
- xsnprintf(size_text, sizeof(size_text), "-");
- printf("%06o %s %s %7s\t", mode, type,
- find_unique_abbrev(oid, abbrev),
- size_text);
- } else
- printf("%06o %s %s\t", mode, type,
- find_unique_abbrev(oid, abbrev));
- }
baselen = base->len;
+ strbuf_expand(&sb, format, expand_show_tree, &data);
+ strbuf_addch(&sb, line_termination);
+ fwrite(sb.buf, sb.len, 1, stdout);
+ strbuf_release(&sb);
+ strbuf_setlen(base, baselen);
+ return recurse;
+}
+
+static int show_tree_common(struct show_tree_data *data, int *recurse,
+ const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode)
+{
+ enum object_type type = object_type(mode);
+ int ret = -1;
+
+ *recurse = 0;
+ data->mode = mode;
+ data->type = type;
+ data->oid = oid;
+ data->pathname = pathname;
+ data->base = base;
+
+ if (type == OBJ_BLOB) {
+ if (ls_options & LS_TREE_ONLY)
+ ret = 0;
+ } else if (type == OBJ_TREE &&
+ show_recursive(base->buf, base->len, pathname)) {
+ *recurse = READ_TREE_RECURSIVE;
+ if (!(ls_options & LS_SHOW_TREES))
+ ret = *recurse;
+ }
+
+ return ret;
+}
+
+static void show_tree_common_default_long(struct strbuf *base,
+ const char *pathname,
+ const size_t baselen)
+{
+ strbuf_addstr(base, pathname);
+ write_name_quoted_relative(base->buf,
+ chomp_prefix ? ls_tree_prefix : NULL, stdout,
+ line_termination);
+ strbuf_setlen(base, baselen);
+}
+
+static int show_tree_default(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode,
+ void *context)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ printf("%06o %s %s\t", data.mode, type_name(data.type),
+ find_unique_abbrev(data.oid, abbrev));
+ show_tree_common_default_long(base, pathname, data.base->len);
+ return recurse;
+}
+
+static int show_tree_long(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+ char size_text[24];
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ if (data.type == OBJ_BLOB) {
+ unsigned long size;
+ if (oid_object_info(the_repository, data.oid, &size) == OBJ_BAD)
+ xsnprintf(size_text, sizeof(size_text), "BAD");
+ else
+ xsnprintf(size_text, sizeof(size_text),
+ "%" PRIuMAX, (uintmax_t)size);
+ } else {
+ xsnprintf(size_text, sizeof(size_text), "-");
+ }
+
+ printf("%06o %s %s %7s\t", data.mode, type_name(data.type),
+ find_unique_abbrev(data.oid, abbrev), size_text);
+ show_tree_common_default_long(base, pathname, data.base->len);
+ return recurse;
+}
+
+static int show_tree_name_only(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
+{
+ int early;
+ int recurse;
+ const size_t baselen = base->len;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
strbuf_addstr(base, pathname);
write_name_quoted_relative(base->buf,
chomp_prefix ? ls_tree_prefix : NULL,
stdout, line_termination);
strbuf_setlen(base, baselen);
- return retval;
+ return recurse;
+}
+
+static int show_tree_object(const struct object_id *oid, struct strbuf *base,
+ const char *pathname, unsigned mode, void *context)
+{
+ int early;
+ int recurse;
+ struct show_tree_data data = { 0 };
+
+ early = show_tree_common(&data, &recurse, oid, base, pathname, mode);
+ if (early >= 0)
+ return early;
+
+ printf("%s%c", find_unique_abbrev(oid, abbrev), line_termination);
+ return recurse;
}
+struct ls_tree_cmdmode_to_fmt {
+ enum ls_tree_cmdmode mode;
+ const char *const fmt;
+ read_tree_fn_t fn;
+};
+
+static struct ls_tree_cmdmode_to_fmt ls_tree_cmdmode_format[] = {
+ {
+ .mode = MODE_DEFAULT,
+ .fmt = "%(objectmode) %(objecttype) %(objectname)%x09%(path)",
+ .fn = show_tree_default,
+ },
+ {
+ .mode = MODE_LONG,
+ .fmt = "%(objectmode) %(objecttype) %(objectname) %(objectsize:padded)%x09%(path)",
+ .fn = show_tree_long,
+ },
+ {
+ .mode = MODE_NAME_ONLY, /* And MODE_NAME_STATUS */
+ .fmt = "%(path)",
+ .fn = show_tree_name_only,
+ },
+ {
+ .mode = MODE_OBJECT_ONLY,
+ .fmt = "%(objectname)",
+ .fn = show_tree_object
+ },
+ {
+ /* fallback */
+ .fn = show_tree_default,
+ },
+};
+
int cmd_ls_tree(int argc, const char **argv, const char *prefix)
{
struct object_id oid;
struct tree *tree;
int i, full_tree = 0;
+ read_tree_fn_t fn = NULL;
const struct option ls_tree_options[] = {
OPT_BIT('d', NULL, &ls_options, N_("only show trees"),
LS_TREE_ONLY),
@@ -133,20 +341,26 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix)
LS_SHOW_TREES),
OPT_SET_INT('z', NULL, &line_termination,
N_("terminate entries with NUL byte"), 0),
- OPT_BIT('l', "long", &ls_options, N_("include object size"),
- LS_SHOW_SIZE),
- OPT_BIT(0, "name-only", &ls_options, N_("list only filenames"),
- LS_NAME_ONLY),
- OPT_BIT(0, "name-status", &ls_options, N_("list only filenames"),
- LS_NAME_ONLY),
+ OPT_CMDMODE('l', "long", &cmdmode, N_("include object size"),
+ MODE_LONG),
+ OPT_CMDMODE(0, "name-only", &cmdmode, N_("list only filenames"),
+ MODE_NAME_ONLY),
+ OPT_CMDMODE(0, "name-status", &cmdmode, N_("list only filenames"),
+ MODE_NAME_STATUS),
+ OPT_CMDMODE(0, "object-only", &cmdmode, N_("list only objects"),
+ MODE_OBJECT_ONLY),
OPT_SET_INT(0, "full-name", &chomp_prefix,
N_("use full path names"), 0),
OPT_BOOL(0, "full-tree", &full_tree,
N_("list entire tree; not just current directory "
"(implies --full-name)")),
+ OPT_STRING_F(0, "format", &format, N_("format"),
+ N_("format to use for the output"),
+ PARSE_OPT_NONEG),
OPT__ABBREV(&abbrev),
OPT_END()
};
+ struct ls_tree_cmdmode_to_fmt *m2f = ls_tree_cmdmode_format;
git_config(git_default_config, NULL);
ls_tree_prefix = prefix;
@@ -159,11 +373,23 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix)
ls_tree_prefix = prefix = NULL;
chomp_prefix = 0;
}
+ /*
+ * We wanted to detect conflicts between --name-only and
+ * --name-status, but once we're done with that subsequent
+ * code should only need to check the primary name.
+ */
+ if (cmdmode == MODE_NAME_STATUS)
+ cmdmode = MODE_NAME_ONLY;
+
/* -d -r should imply -t, but -d by itself should not have to. */
if ( (LS_TREE_ONLY|LS_RECURSIVE) ==
((LS_TREE_ONLY|LS_RECURSIVE) & ls_options))
ls_options |= LS_SHOW_TREES;
+ if (format && cmdmode)
+ usage_msg_opt(
+ _("--format can't be combined with other format-altering options"),
+ ls_tree_usage, ls_tree_options);
if (argc < 1)
usage_with_options(ls_tree_usage, ls_tree_options);
if (get_oid(argv[0], &oid))
@@ -185,6 +411,24 @@ int cmd_ls_tree(int argc, const char **argv, const char *prefix)
tree = parse_tree_indirect(&oid);
if (!tree)
die("not a tree object");
- return !!read_tree(the_repository, tree,
- &pathspec, show_tree, NULL);
+ /*
+ * The generic show_tree_fmt() is slower than show_tree(), so
+ * take the fast path if possible.
+ */
+ while (m2f) {
+ if (!m2f->fmt) {
+ fn = format ? show_tree_fmt : show_tree_default;
+ } else if (format && !strcmp(format, m2f->fmt)) {
+ cmdmode = m2f->mode;
+ fn = m2f->fn;
+ } else if (!format && cmdmode == m2f->mode) {
+ fn = m2f->fn;
+ } else {
+ m2f++;
+ continue;
+ }
+ break;
+ }
+
+ return !!read_tree(the_repository, tree, &pathspec, fn, NULL);
}
diff --git a/builtin/mailsplit.c b/builtin/mailsplit.c
index 7baef30569..30952353a3 100644
--- a/builtin/mailsplit.c
+++ b/builtin/mailsplit.c
@@ -223,6 +223,9 @@ static int split_mbox(const char *file, const char *dir, int allow_bare,
FILE *f = !strcmp(file, "-") ? stdin : fopen(file, "r");
int file_done = 0;
+ if (isatty(fileno(f)))
+ warning(_("reading patches from stdin/tty..."));
+
if (!f) {
error_errno("cannot open mbox %s", file);
goto out;
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
index 4480ba3982..5edbb7fe86 100644
--- a/builtin/multi-pack-index.c
+++ b/builtin/multi-pack-index.c
@@ -44,7 +44,7 @@ static char const * const builtin_multi_pack_index_usage[] = {
};
static struct opts_multi_pack_index {
- const char *object_dir;
+ char *object_dir;
const char *preferred_pack;
const char *refs_snapshot;
unsigned long batch_size;
@@ -52,9 +52,23 @@ static struct opts_multi_pack_index {
int stdin_packs;
} opts;
+
+static int parse_object_dir(const struct option *opt, const char *arg,
+ int unset)
+{
+ free(opts.object_dir);
+ if (unset)
+ opts.object_dir = xstrdup(get_object_directory());
+ else
+ opts.object_dir = real_pathdup(arg, 1);
+ return 0;
+}
+
static struct option common_opts[] = {
- OPT_FILENAME(0, "object-dir", &opts.object_dir,
- N_("object directory containing set of packfile and pack-index pairs")),
+ OPT_CALLBACK(0, "object-dir", &opts.object_dir,
+ N_("directory"),
+ N_("object directory containing set of packfile and pack-index pairs"),
+ parse_object_dir),
OPT_END(),
};
@@ -232,31 +246,40 @@ static int cmd_multi_pack_index_repack(int argc, const char **argv)
int cmd_multi_pack_index(int argc, const char **argv,
const char *prefix)
{
+ int res;
struct option *builtin_multi_pack_index_options = common_opts;
git_config(git_default_config, NULL);
+ if (the_repository &&
+ the_repository->objects &&
+ the_repository->objects->odb)
+ opts.object_dir = xstrdup(the_repository->objects->odb->path);
+
argc = parse_options(argc, argv, prefix,
builtin_multi_pack_index_options,
builtin_multi_pack_index_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (!opts.object_dir)
- opts.object_dir = get_object_directory();
-
if (!argc)
goto usage;
if (!strcmp(argv[0], "repack"))
- return cmd_multi_pack_index_repack(argc, argv);
+ res = cmd_multi_pack_index_repack(argc, argv);
else if (!strcmp(argv[0], "write"))
- return cmd_multi_pack_index_write(argc, argv);
+ res = cmd_multi_pack_index_write(argc, argv);
else if (!strcmp(argv[0], "verify"))
- return cmd_multi_pack_index_verify(argc, argv);
+ res = cmd_multi_pack_index_verify(argc, argv);
else if (!strcmp(argv[0], "expire"))
- return cmd_multi_pack_index_expire(argc, argv);
+ res = cmd_multi_pack_index_expire(argc, argv);
+ else {
+ error(_("unrecognized subcommand: %s"), argv[0]);
+ goto usage;
+ }
+
+ free(opts.object_dir);
+ return res;
- error(_("unrecognized subcommand: %s"), argv[0]);
usage:
usage_with_options(builtin_multi_pack_index_usage,
builtin_multi_pack_index_options);
diff --git a/builtin/name-rev.c b/builtin/name-rev.c
index 929591269d..02ea9d1633 100644
--- a/builtin/name-rev.c
+++ b/builtin/name-rev.c
@@ -9,6 +9,7 @@
#include "prio-queue.h"
#include "hash-lookup.h"
#include "commit-slab.h"
+#include "commit-graph.h"
/*
* One day. See the 'name a rev shortly after epoch' test in t6120 when
@@ -17,7 +18,7 @@
#define CUTOFF_DATE_SLOP 86400
struct rev_name {
- char *tip_name;
+ const char *tip_name;
timestamp_t taggerdate;
int generation;
int distance;
@@ -26,15 +27,64 @@ struct rev_name {
define_commit_slab(commit_rev_name, struct rev_name);
+static timestamp_t generation_cutoff = GENERATION_NUMBER_INFINITY;
static timestamp_t cutoff = TIME_MAX;
static struct commit_rev_name rev_names;
+/* Disable the cutoff checks entirely */
+static void disable_cutoff(void)
+{
+ generation_cutoff = 0;
+ cutoff = 0;
+}
+
+/* Cutoff searching any commits older than this one */
+static void set_commit_cutoff(struct commit *commit)
+{
+
+ if (cutoff > commit->date)
+ cutoff = commit->date;
+
+ if (generation_cutoff) {
+ timestamp_t generation = commit_graph_generation(commit);
+
+ if (generation_cutoff > generation)
+ generation_cutoff = generation;
+ }
+}
+
+/* adjust the commit date cutoff with a slop to allow for slightly incorrect
+ * commit timestamps in case of clock skew.
+ */
+static void adjust_cutoff_timestamp_for_slop(void)
+{
+ if (cutoff) {
+ /* check for undeflow */
+ if (cutoff > TIME_MIN + CUTOFF_DATE_SLOP)
+ cutoff = cutoff - CUTOFF_DATE_SLOP;
+ else
+ cutoff = TIME_MIN;
+ }
+}
+
+/* Check if a commit is before the cutoff. Prioritize generation numbers
+ * first, but use the commit timestamp if we lack generation data.
+ */
+static int commit_is_before_cutoff(struct commit *commit)
+{
+ if (generation_cutoff < GENERATION_NUMBER_INFINITY)
+ return generation_cutoff &&
+ commit_graph_generation(commit) < generation_cutoff;
+
+ return commit->date < cutoff;
+}
+
/* How many generations are maximally preferred over _one_ merge traversal? */
#define MERGE_TRAVERSAL_WEIGHT 65535
static int is_valid_rev_name(const struct rev_name *name)
{
- return name && (name->generation || name->tip_name);
+ return name && name->tip_name;
}
static struct rev_name *get_commit_rev_name(const struct commit *commit)
@@ -96,20 +146,9 @@ static struct rev_name *create_or_update_name(struct commit *commit,
{
struct rev_name *name = commit_rev_name_at(&rev_names, commit);
- if (is_valid_rev_name(name)) {
- if (!is_better_name(name, taggerdate, generation, distance, from_tag))
- return NULL;
-
- /*
- * This string might still be shared with ancestors
- * (generation > 0). We can release it here regardless,
- * because the new name that has just won will be better
- * for them as well, so name_rev() will replace these
- * stale pointers when it processes the parents.
- */
- if (!name->generation)
- free(name->tip_name);
- }
+ if (is_valid_rev_name(name) &&
+ !is_better_name(name, taggerdate, generation, distance, from_tag))
+ return NULL;
name->taggerdate = taggerdate;
name->generation = generation;
@@ -151,7 +190,7 @@ static void name_rev(struct commit *start_commit,
struct rev_name *start_name;
parse_commit(start_commit);
- if (start_commit->date < cutoff)
+ if (commit_is_before_cutoff(start_commit))
return;
start_name = create_or_update_name(start_commit, taggerdate, 0, 0,
@@ -181,7 +220,7 @@ static void name_rev(struct commit *start_commit,
int generation, distance;
parse_commit(parent);
- if (parent->date < cutoff)
+ if (commit_is_before_cutoff(parent))
continue;
if (parent_number > 1) {
@@ -568,7 +607,7 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
usage_with_options(name_rev_usage, opts);
}
if (all || annotate_stdin)
- cutoff = 0;
+ disable_cutoff();
for (; argc; argc--, argv++) {
struct object_id oid;
@@ -596,10 +635,8 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
continue;
}
- if (commit) {
- if (cutoff > commit->date)
- cutoff = commit->date;
- }
+ if (commit)
+ set_commit_cutoff(commit);
if (peel_tag) {
if (!commit) {
@@ -612,13 +649,8 @@ int cmd_name_rev(int argc, const char **argv, const char *prefix)
add_object_array(object, *argv, &revs);
}
- if (cutoff) {
- /* check for undeflow */
- if (cutoff > TIME_MIN + CUTOFF_DATE_SLOP)
- cutoff = cutoff - CUTOFF_DATE_SLOP;
- else
- cutoff = TIME_MIN;
- }
+ adjust_cutoff_timestamp_for_slop();
+
for_each_ref(name_ref, &data);
name_tips();
diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 47b0e1da7d..014dcd4bc9 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -237,8 +237,6 @@ static unsigned long cache_max_small_delta_size = 1000;
static unsigned long window_memory_limit = 0;
-static struct list_objects_filter_options filter_options;
-
static struct string_list uri_protocols = STRING_LIST_INIT_NODUP;
enum missing_action {
@@ -1199,16 +1197,26 @@ static void write_pack_file(void)
display_progress(progress_state, written);
}
- /*
- * Did we write the wrong # entries in the header?
- * If so, rewrite it like in fast-import
- */
if (pack_to_stdout) {
- finalize_hashfile(f, hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
+ /*
+ * We never fsync when writing to stdout since we may
+ * not be writing to an actual pack file. For instance,
+ * the upload-pack code passes a pipe here. Calling
+ * fsync on a pipe results in unnecessary
+ * synchronization with the reader on some platforms.
+ */
+ finalize_hashfile(f, hash, FSYNC_COMPONENT_NONE,
+ CSUM_HASH_IN_STREAM | CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- finalize_hashfile(f, hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
+ finalize_hashfile(f, hash, FSYNC_COMPONENT_PACK,
+ CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = finalize_hashfile(f, hash, 0);
+ /*
+ * If we wrote the wrong number of entries in the
+ * header, rewrite it like in fast-import.
+ */
+
+ int fd = finalize_hashfile(f, hash, FSYNC_COMPONENT_PACK, 0);
fixup_pack_header_footer(fd, hash, pack_tmp_name,
nr_written, hash, offset);
close(fd);
@@ -3651,7 +3659,7 @@ static int pack_options_allow_reuse(void)
static int get_object_list_from_bitmap(struct rev_info *revs)
{
- if (!(bitmap_git = prepare_bitmap_walk(revs, &filter_options, 0)))
+ if (!(bitmap_git = prepare_bitmap_walk(revs, 0)))
return -1;
if (pack_options_allow_reuse() &&
@@ -3714,9 +3722,8 @@ static void mark_bitmap_preferred_tips(void)
}
}
-static void get_object_list(int ac, const char **av)
+static void get_object_list(struct rev_info *revs, int ac, const char **av)
{
- struct rev_info revs;
struct setup_revision_opt s_r_opt = {
.allow_exclude_promisor_objects = 1,
};
@@ -3724,9 +3731,8 @@ static void get_object_list(int ac, const char **av)
int flags = 0;
int save_warning;
- repo_init_revisions(the_repository, &revs, NULL);
save_commit_buffer = 0;
- setup_revisions(ac, av, &revs, &s_r_opt);
+ setup_revisions(ac, av, revs, &s_r_opt);
/* make sure shallows are read */
is_repository_shallow(the_repository);
@@ -3756,13 +3762,13 @@ static void get_object_list(int ac, const char **av)
}
die(_("not a rev '%s'"), line);
}
- if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
+ if (handle_revision_arg(line, revs, flags, REVARG_CANNOT_BE_FILENAME))
die(_("bad revision '%s'"), line);
}
warn_on_object_refname_ambiguity = save_warning;
- if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
+ if (use_bitmap_index && !get_object_list_from_bitmap(revs))
return;
if (use_delta_islands)
@@ -3771,24 +3777,24 @@ static void get_object_list(int ac, const char **av)
if (write_bitmap_index)
mark_bitmap_preferred_tips();
- if (prepare_revision_walk(&revs))
+ if (prepare_revision_walk(revs))
die(_("revision walk setup failed"));
- mark_edges_uninteresting(&revs, show_edge, sparse);
+ mark_edges_uninteresting(revs, show_edge, sparse);
if (!fn_show_object)
fn_show_object = show_object;
- traverse_commit_list_filtered(&filter_options, &revs,
- show_commit, fn_show_object, NULL,
- NULL);
+ traverse_commit_list(revs,
+ show_commit, fn_show_object,
+ NULL);
if (unpack_unreachable_expiration) {
- revs.ignore_missing_links = 1;
- if (add_unseen_recent_objects_to_traversal(&revs,
+ revs->ignore_missing_links = 1;
+ if (add_unseen_recent_objects_to_traversal(revs,
unpack_unreachable_expiration))
die(_("unable to add recent objects"));
- if (prepare_revision_walk(&revs))
+ if (prepare_revision_walk(revs))
die(_("revision walk setup failed"));
- traverse_commit_list(&revs, record_recent_commit,
+ traverse_commit_list(revs, record_recent_commit,
record_recent_object, NULL);
}
@@ -3861,6 +3867,21 @@ static int option_parse_unpack_unreachable(const struct option *opt,
return 0;
}
+struct po_filter_data {
+ unsigned have_revs:1;
+ struct rev_info revs;
+};
+
+static struct list_objects_filter_options *po_filter_revs_init(void *value)
+{
+ struct po_filter_data *data = value;
+
+ repo_init_revisions(the_repository, &data->revs, NULL);
+ data->have_revs = 1;
+
+ return &data->revs.filter;
+}
+
int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{
int use_internal_rev_list = 0;
@@ -3871,6 +3892,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
int rev_list_index = 0;
int stdin_packs = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
+ struct po_filter_data pfd = { .have_revs = 0 };
+
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
N_("do not show progress meter"), 0),
@@ -3956,7 +3979,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
&write_bitmap_index,
N_("write a bitmap index if possible"),
WRITE_BITMAP_QUIET, PARSE_OPT_HIDDEN),
- OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
+ OPT_PARSE_LIST_OBJECTS_FILTER_INIT(&pfd, po_filter_revs_init),
OPT_CALLBACK_F(0, "missing", NULL, N_("action"),
N_("handling for missing objects"), PARSE_OPT_NONEG,
option_parse_missing_action),
@@ -4076,7 +4099,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
if (!rev_list_all || !rev_list_reflog || !rev_list_index)
unpack_unreachable_expiration = 0;
- if (filter_options.choice) {
+ if (pfd.have_revs && pfd.revs.filter.choice) {
if (!pack_to_stdout)
die(_("cannot use --filter without --stdout"));
if (stdin_packs)
@@ -4152,8 +4175,13 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
add_unreachable_loose_objects();
} else if (!use_internal_rev_list) {
read_object_list_from_stdin();
+ } else if (pfd.have_revs) {
+ get_object_list(&pfd.revs, rp.nr, rp.v);
} else {
- get_object_list(rp.nr, rp.v);
+ struct rev_info revs;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ get_object_list(&revs, rp.nr, rp.v);
}
cleanup_preferred_base();
if (include_tag && nr_result)
diff --git a/builtin/rebase.c b/builtin/rebase.c
index b29ad2b65e..7ab50cda2a 100644
--- a/builtin/rebase.c
+++ b/builtin/rebase.c
@@ -829,6 +829,8 @@ static int checkout_up_to_date(struct rebase_options *options)
ropts.oid = &options->orig_head;
ropts.branch = options->head_name;
ropts.flags = RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
+ if (!ropts.branch)
+ ropts.flags |= RESET_HEAD_DETACH;
ropts.head_msg = buf.buf;
if (reset_head(the_repository, &ropts) < 0)
ret = error(_("could not switch to %s"), options->switch_to);
@@ -1185,11 +1187,9 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
} else {
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/interactive", merge_dir());
- if(file_exists(buf.buf)) {
- options.type = REBASE_MERGE;
+ options.type = REBASE_MERGE;
+ if (file_exists(buf.buf))
options.flags |= REBASE_INTERACTIVE_EXPLICIT;
- } else
- options.type = REBASE_MERGE;
}
options.state_dir = merge_dir();
}
@@ -1581,33 +1581,6 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
options.upstream_arg = "--root";
}
- /* Make sure the branch to rebase onto is valid. */
- if (keep_base) {
- strbuf_reset(&buf);
- strbuf_addstr(&buf, options.upstream_name);
- strbuf_addstr(&buf, "...");
- options.onto_name = xstrdup(buf.buf);
- } else if (!options.onto_name)
- options.onto_name = options.upstream_name;
- if (strstr(options.onto_name, "...")) {
- if (get_oid_mb(options.onto_name, &merge_base) < 0) {
- if (keep_base)
- die(_("'%s': need exactly one merge base with branch"),
- options.upstream_name);
- else
- die(_("'%s': need exactly one merge base"),
- options.onto_name);
- }
- options.onto = lookup_commit_or_die(&merge_base,
- options.onto_name);
- } else {
- options.onto =
- lookup_commit_reference_by_name(options.onto_name);
- if (!options.onto)
- die(_("Does not point to a valid commit '%s'"),
- options.onto_name);
- }
-
/*
* If the branch to rebase is given, that is the branch we will rebase
* branch_name -- branch/commit being rebased, or
@@ -1657,6 +1630,34 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
} else
BUG("unexpected number of arguments left to parse");
+ /* Make sure the branch to rebase onto is valid. */
+ if (keep_base) {
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, options.upstream_name);
+ strbuf_addstr(&buf, "...");
+ strbuf_addstr(&buf, branch_name);
+ options.onto_name = xstrdup(buf.buf);
+ } else if (!options.onto_name)
+ options.onto_name = options.upstream_name;
+ if (strstr(options.onto_name, "...")) {
+ if (get_oid_mb(options.onto_name, &merge_base) < 0) {
+ if (keep_base)
+ die(_("'%s': need exactly one merge base with branch"),
+ options.upstream_name);
+ else
+ die(_("'%s': need exactly one merge base"),
+ options.onto_name);
+ }
+ options.onto = lookup_commit_or_die(&merge_base,
+ options.onto_name);
+ } else {
+ options.onto =
+ lookup_commit_reference_by_name(options.onto_name);
+ if (!options.onto)
+ die(_("Does not point to a valid commit '%s'"),
+ options.onto_name);
+ }
+
if (options.fork_point > 0) {
struct commit *head =
lookup_commit_reference(the_repository,
diff --git a/builtin/reflog.c b/builtin/reflog.c
index 9407f835cb..c943c2aabe 100644
--- a/builtin/reflog.c
+++ b/builtin/reflog.c
@@ -5,8 +5,48 @@
#include "worktree.h"
#include "reflog.h"
-static const char reflog_exists_usage[] =
-N_("git reflog exists <ref>");
+#define BUILTIN_REFLOG_SHOW_USAGE \
+ N_("git reflog [show] [<log-options>] [<ref>]")
+
+#define BUILTIN_REFLOG_EXPIRE_USAGE \
+ N_("git reflog expire [--expire=<time>] [--expire-unreachable=<time>]\n" \
+ " [--rewrite] [--updateref] [--stale-fix]\n" \
+ " [--dry-run | -n] [--verbose] [--all [--single-worktree] | <refs>...]")
+
+#define BUILTIN_REFLOG_DELETE_USAGE \
+ N_("git reflog delete [--rewrite] [--updateref]\n" \
+ " [--dry-run | -n] [--verbose] <ref>@{<specifier>}...")
+
+#define BUILTIN_REFLOG_EXISTS_USAGE \
+ N_("git reflog exists <ref>")
+
+static const char *const reflog_show_usage[] = {
+ BUILTIN_REFLOG_SHOW_USAGE,
+ NULL,
+};
+
+static const char *const reflog_expire_usage[] = {
+ BUILTIN_REFLOG_EXPIRE_USAGE,
+ NULL
+};
+
+static const char *const reflog_delete_usage[] = {
+ BUILTIN_REFLOG_DELETE_USAGE,
+ NULL
+};
+
+static const char *const reflog_exists_usage[] = {
+ BUILTIN_REFLOG_EXISTS_USAGE,
+ NULL,
+};
+
+static const char *const reflog_usage[] = {
+ BUILTIN_REFLOG_SHOW_USAGE,
+ BUILTIN_REFLOG_EXPIRE_USAGE,
+ BUILTIN_REFLOG_DELETE_USAGE,
+ BUILTIN_REFLOG_EXISTS_USAGE,
+ NULL
+};
static timestamp_t default_reflog_expire;
static timestamp_t default_reflog_expire_unreachable;
@@ -147,14 +187,6 @@ static void set_reflog_expiry_param(struct cmd_reflog_expire_cb *cb, const char
cb->expire_unreachable = default_reflog_expire_unreachable;
}
-static const char * reflog_expire_usage[] = {
- N_("git reflog expire [--expire=<time>] "
- "[--expire-unreachable=<time>] "
- "[--rewrite] [--updateref] [--stale-fix] [--dry-run | -n] "
- "[--verbose] [--all] <refs>..."),
- NULL
-};
-
static int expire_unreachable_callback(const struct option *opt,
const char *arg,
int unset)
@@ -183,6 +215,19 @@ static int expire_total_callback(const struct option *opt,
return 0;
}
+static int cmd_reflog_show(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ parse_options(argc, argv, prefix, options, reflog_show_usage,
+ PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 |
+ PARSE_OPT_KEEP_UNKNOWN);
+
+ return cmd_log_reflog(argc, argv, prefix);
+}
+
static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
{
struct cmd_reflog_expire_cb cmd = { 0 };
@@ -304,12 +349,6 @@ static int cmd_reflog_expire(int argc, const char **argv, const char *prefix)
return status;
}
-static const char * reflog_delete_usage[] = {
- N_("git reflog delete [--rewrite] [--updateref] "
- "[--dry-run | -n] [--verbose] <refs>..."),
- NULL
-};
-
static int cmd_reflog_delete(int argc, const char **argv, const char *prefix)
{
int i, status = 0;
@@ -342,57 +381,62 @@ static int cmd_reflog_delete(int argc, const char **argv, const char *prefix)
static int cmd_reflog_exists(int argc, const char **argv, const char *prefix)
{
- int i, start = 0;
-
- for (i = 1; i < argc; i++) {
- const char *arg = argv[i];
- if (!strcmp(arg, "--")) {
- i++;
- break;
- }
- else if (arg[0] == '-')
- usage(_(reflog_exists_usage));
- else
- break;
- }
-
- start = i;
+ struct option options[] = {
+ OPT_END()
+ };
+ const char *refname;
- if (argc - start != 1)
- usage(_(reflog_exists_usage));
+ argc = parse_options(argc, argv, prefix, options, reflog_exists_usage,
+ 0);
+ if (!argc)
+ usage_with_options(reflog_exists_usage, options);
- if (check_refname_format(argv[start], REFNAME_ALLOW_ONELEVEL))
- die(_("invalid ref format: %s"), argv[start]);
- return !reflog_exists(argv[start]);
+ refname = argv[0];
+ if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
+ die(_("invalid ref format: %s"), refname);
+ return !reflog_exists(refname);
}
/*
* main "reflog"
*/
-static const char reflog_usage[] =
-"git reflog [ show | expire | delete | exists ]";
-
int cmd_reflog(int argc, const char **argv, const char *prefix)
{
- if (argc > 1 && !strcmp(argv[1], "-h"))
- usage(_(reflog_usage));
+ struct option options[] = {
+ OPT_END()
+ };
- /* With no command, we default to showing it. */
- if (argc < 2 || *argv[1] == '-')
- return cmd_log_reflog(argc, argv, prefix);
+ argc = parse_options(argc, argv, prefix, options, reflog_usage,
+ PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0 |
+ PARSE_OPT_KEEP_UNKNOWN |
+ PARSE_OPT_NO_INTERNAL_HELP);
- if (!strcmp(argv[1], "show"))
- return cmd_log_reflog(argc - 1, argv + 1, prefix);
+ /*
+ * With "git reflog" we default to showing it. !argc is
+ * impossible with PARSE_OPT_KEEP_ARGV0.
+ */
+ if (argc == 1)
+ goto log_reflog;
- if (!strcmp(argv[1], "expire"))
- return cmd_reflog_expire(argc - 1, argv + 1, prefix);
+ if (!strcmp(argv[1], "-h"))
+ usage_with_options(reflog_usage, options);
+ else if (*argv[1] == '-')
+ goto log_reflog;
- if (!strcmp(argv[1], "delete"))
+ if (!strcmp(argv[1], "show"))
+ return cmd_reflog_show(argc - 1, argv + 1, prefix);
+ else if (!strcmp(argv[1], "expire"))
+ return cmd_reflog_expire(argc - 1, argv + 1, prefix);
+ else if (!strcmp(argv[1], "delete"))
return cmd_reflog_delete(argc - 1, argv + 1, prefix);
-
- if (!strcmp(argv[1], "exists"))
+ else if (!strcmp(argv[1], "exists"))
return cmd_reflog_exists(argc - 1, argv + 1, prefix);
+ /*
+ * Fall-through for e.g. "git reflog -1", "git reflog master",
+ * as well as the plain "git reflog" above goto above.
+ */
+log_reflog:
return cmd_log_reflog(argc, argv, prefix);
}
diff --git a/builtin/repack.c b/builtin/repack.c
index da1e364a75..d1a563d5b6 100644
--- a/builtin/repack.c
+++ b/builtin/repack.c
@@ -22,6 +22,7 @@ static int delta_base_offset = 1;
static int pack_kept_objects = -1;
static int write_bitmaps = -1;
static int use_delta_islands;
+static int run_update_server_info = 1;
static char *packdir, *packtmp_name, *packtmp;
static const char *const git_repack_usage[] = {
@@ -54,6 +55,10 @@ static int repack_config(const char *var, const char *value, void *cb)
use_delta_islands = git_config_bool(var, value);
return 0;
}
+ if (strcmp(var, "repack.updateserverinfo") == 0) {
+ run_update_server_info = git_config_bool(var, value);
+ return 0;
+ }
return git_default_config(var, value, cb);
}
@@ -620,7 +625,6 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
const char *unpack_unreachable = NULL;
int keep_unreachable = 0;
struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
- int no_update_server_info = 0;
struct pack_objects_args po_args = {NULL};
int geometric_factor = 0;
int write_midx = 0;
@@ -637,8 +641,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
N_("pass --no-reuse-delta to git-pack-objects")),
OPT_BOOL('F', NULL, &po_args.no_reuse_object,
N_("pass --no-reuse-object to git-pack-objects")),
- OPT_BOOL('n', NULL, &no_update_server_info,
- N_("do not run git-update-server-info")),
+ OPT_NEGBIT('n', NULL, &run_update_server_info,
+ N_("do not run git-update-server-info"), 1),
OPT__QUIET(&po_args.quiet, N_("be quiet")),
OPT_BOOL('l', "local", &po_args.local,
N_("pass --local to git-pack-objects")),
@@ -939,7 +943,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
prune_shallow(PRUNE_QUICK);
}
- if (!no_update_server_info)
+ if (run_update_server_info)
update_server_info(0);
remove_temporary_files();
diff --git a/builtin/reset.c b/builtin/reset.c
index 6e65e90c5d..344fff8f3a 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -392,6 +392,7 @@ static int git_reset_config(const char *var, const char *value, void *cb)
int cmd_reset(int argc, const char **argv, const char *prefix)
{
int reset_type = NONE, update_ref_status = 0, quiet = 0;
+ int no_refresh = 0;
int patch_mode = 0, pathspec_file_nul = 0, unborn;
const char *rev, *pathspec_from_file = NULL;
struct object_id oid;
@@ -399,6 +400,8 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
int intent_to_add = 0;
const struct option options[] = {
OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_BOOL(0, "no-refresh", &no_refresh,
+ N_("skip refreshing the index after reset")),
OPT_SET_INT(0, "mixed", &reset_type,
N_("reset HEAD and index"), MIXED),
OPT_SET_INT(0, "soft", &reset_type, N_("reset only HEAD"), SOFT),
@@ -420,7 +423,6 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
};
git_config(git_reset_config, NULL);
- git_config_get_bool("reset.quiet", &quiet);
argc = parse_options(argc, argv, prefix, options, git_reset_usage,
PARSE_OPT_KEEP_DASHDASH);
@@ -517,17 +519,16 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
if (read_from_tree(&pathspec, &oid, intent_to_add))
return 1;
the_index.updated_skipworktree = 1;
- if (!quiet && get_git_work_tree()) {
+ if (!no_refresh && get_git_work_tree()) {
uint64_t t_begin, t_delta_in_ms;
t_begin = getnanotime();
refresh_index(&the_index, flags, NULL, NULL,
_("Unstaged changes after reset:"));
t_delta_in_ms = (getnanotime() - t_begin) / 1000000;
- if (advice_enabled(ADVICE_RESET_QUIET_WARNING) && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) {
- printf(_("\nIt took %.2f seconds to enumerate unstaged changes after reset. You can\n"
- "use '--quiet' to avoid this. Set the config setting reset.quiet to true\n"
- "to make this the default.\n"), t_delta_in_ms / 1000.0);
+ if (!quiet && advice_enabled(ADVICE_RESET_NO_REFRESH_WARNING) && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) {
+ advise(_("It took %.2f seconds to refresh the index after reset. You can use\n"
+ "'--no-refresh' to avoid this."), t_delta_in_ms / 1000.0);
}
}
} else {
diff --git a/builtin/rev-list.c b/builtin/rev-list.c
index 38528c7f15..572da1472e 100644
--- a/builtin/rev-list.c
+++ b/builtin/rev-list.c
@@ -62,7 +62,6 @@ static const char rev_list_usage[] =
static struct progress *progress;
static unsigned progress_counter;
-static struct list_objects_filter_options filter_options;
static struct oidset omitted_objects;
static int arg_print_omitted; /* print objects omitted by filter */
@@ -400,7 +399,6 @@ static inline int parse_missing_action_value(const char *value)
}
static int try_bitmap_count(struct rev_info *revs,
- struct list_objects_filter_options *filter,
int filter_provided_objects)
{
uint32_t commit_count = 0,
@@ -436,7 +434,7 @@ static int try_bitmap_count(struct rev_info *revs,
*/
max_count = revs->max_count;
- bitmap_git = prepare_bitmap_walk(revs, filter, filter_provided_objects);
+ bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects);
if (!bitmap_git)
return -1;
@@ -453,7 +451,6 @@ static int try_bitmap_count(struct rev_info *revs,
}
static int try_bitmap_traversal(struct rev_info *revs,
- struct list_objects_filter_options *filter,
int filter_provided_objects)
{
struct bitmap_index *bitmap_git;
@@ -465,7 +462,7 @@ static int try_bitmap_traversal(struct rev_info *revs,
if (revs->max_count >= 0)
return -1;
- bitmap_git = prepare_bitmap_walk(revs, filter, filter_provided_objects);
+ bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects);
if (!bitmap_git)
return -1;
@@ -475,7 +472,6 @@ static int try_bitmap_traversal(struct rev_info *revs,
}
static int try_bitmap_disk_usage(struct rev_info *revs,
- struct list_objects_filter_options *filter,
int filter_provided_objects)
{
struct bitmap_index *bitmap_git;
@@ -483,7 +479,7 @@ static int try_bitmap_disk_usage(struct rev_info *revs,
if (!show_disk_usage)
return -1;
- bitmap_git = prepare_bitmap_walk(revs, filter, filter_provided_objects);
+ bitmap_git = prepare_bitmap_walk(revs, filter_provided_objects);
if (!bitmap_git)
return -1;
@@ -595,17 +591,6 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
show_progress = arg;
continue;
}
-
- if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) {
- parse_list_objects_filter(&filter_options, arg);
- if (filter_options.choice && !revs.blob_objects)
- die(_("object filtering requires --objects"));
- continue;
- }
- if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
- list_objects_filter_set_no_filter(&filter_options);
- continue;
- }
if (!strcmp(arg, "--filter-provided-objects")) {
filter_provided_objects = 1;
continue;
@@ -688,11 +673,11 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
progress = start_delayed_progress(show_progress, 0);
if (use_bitmap_index) {
- if (!try_bitmap_count(&revs, &filter_options, filter_provided_objects))
+ if (!try_bitmap_count(&revs, filter_provided_objects))
return 0;
- if (!try_bitmap_disk_usage(&revs, &filter_options, filter_provided_objects))
+ if (!try_bitmap_disk_usage(&revs, filter_provided_objects))
return 0;
- if (!try_bitmap_traversal(&revs, &filter_options, filter_provided_objects))
+ if (!try_bitmap_traversal(&revs, filter_provided_objects))
return 0;
}
@@ -733,7 +718,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
oidset_init(&missing_objects, DEFAULT_OIDSET_SIZE);
traverse_commit_list_filtered(
- &filter_options, &revs, show_commit, show_object, &info,
+ &revs, show_commit, show_object, &info,
(arg_print_omitted ? &omitted_objects : NULL));
if (arg_print_omitted) {
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 7f02e4352a..0217d44c5b 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -8,7 +8,6 @@
#include "run-command.h"
#include "strbuf.h"
#include "string-list.h"
-#include "cache.h"
#include "cache-tree.h"
#include "lockfile.h"
#include "resolve-undo.h"
diff --git a/builtin/stash.c b/builtin/stash.c
index 242e73cbb0..0c7b6a9588 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -16,7 +16,6 @@
#include "log-tree.h"
#include "diffcore.h"
#include "exec-cmd.h"
-#include "entry.h"
#include "reflog.h"
#define INCLUDE_ALL_FILES 2
@@ -311,7 +310,7 @@ static int reset_head(void)
* API for resetting.
*/
cp.git_cmd = 1;
- strvec_push(&cp.args, "reset");
+ strvec_pushl(&cp.args, "reset", "--quiet", "--refresh", NULL);
return run_command(&cp);
}
@@ -1623,7 +1622,8 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
struct child_process cp = CHILD_PROCESS_INIT;
cp.git_cmd = 1;
- strvec_pushl(&cp.args, "reset", "-q", "--", NULL);
+ strvec_pushl(&cp.args, "reset", "-q", "--refresh", "--",
+ NULL);
add_pathspecs(&cp.args, ps);
if (run_command(&cp)) {
ret = -1;
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index 2f7c58362b..1a8e5d0621 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -31,11 +31,13 @@
typedef void (*each_submodule_fn)(const struct cache_entry *list_item,
void *cb_data);
-static char *get_default_remote(void)
+static char *repo_get_default_remote(struct repository *repo)
{
char *dest = NULL, *ret;
struct strbuf sb = STRBUF_INIT;
- const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+ struct ref_store *store = get_main_ref_store(repo);
+ const char *refname = refs_resolve_ref_unsafe(store, "HEAD", 0, NULL,
+ NULL);
if (!refname)
die(_("No such ref: %s"), "HEAD");
@@ -48,7 +50,7 @@ static char *get_default_remote(void)
die(_("Expecting a full ref name, got %s"), refname);
strbuf_addf(&sb, "branch.%s.remote", refname);
- if (git_config_get_string(sb.buf, &dest))
+ if (repo_config_get_string(repo, sb.buf, &dest))
ret = xstrdup("origin");
else
ret = dest;
@@ -57,19 +59,17 @@ static char *get_default_remote(void)
return ret;
}
-static int print_default_remote(int argc, const char **argv, const char *prefix)
+static char *get_default_remote_submodule(const char *module_path)
{
- char *remote;
-
- if (argc != 1)
- die(_("submodule--helper print-default-remote takes no arguments"));
+ struct repository subrepo;
- remote = get_default_remote();
- if (remote)
- printf("%s\n", remote);
+ repo_submodule_init(&subrepo, the_repository, module_path, null_oid());
+ return repo_get_default_remote(&subrepo);
+}
- free(remote);
- return 0;
+static char *get_default_remote(void)
+{
+ return repo_get_default_remote(the_repository);
}
static int starts_with_dot_slash(const char *str)
@@ -247,11 +247,10 @@ static int resolve_relative_url_test(int argc, const char **argv, const char *pr
return 0;
}
-/* the result should be freed by the caller. */
-static char *get_submodule_displaypath(const char *path, const char *prefix)
+static char *do_get_submodule_displaypath(const char *path,
+ const char *prefix,
+ const char *super_prefix)
{
- const char *super_prefix = get_super_prefix();
-
if (prefix && super_prefix) {
BUG("cannot have prefix '%s' and superprefix '%s'",
prefix, super_prefix);
@@ -267,6 +266,13 @@ static char *get_submodule_displaypath(const char *path, const char *prefix)
}
}
+/* the result should be freed by the caller. */
+static char *get_submodule_displaypath(const char *path, const char *prefix)
+{
+ const char *super_prefix = get_super_prefix();
+ return do_get_submodule_displaypath(path, prefix, super_prefix);
+}
+
static char *compute_rev_name(const char *sub_path, const char* object_id)
{
struct strbuf sb = STRBUF_INIT;
@@ -588,18 +594,22 @@ static int module_foreach(int argc, const char **argv, const char *prefix)
struct init_cb {
const char *prefix;
+ const char *superprefix;
unsigned int flags;
};
#define INIT_CB_INIT { 0 }
static void init_submodule(const char *path, const char *prefix,
- unsigned int flags)
+ const char *superprefix, unsigned int flags)
{
const struct submodule *sub;
struct strbuf sb = STRBUF_INIT;
char *upd = NULL, *url = NULL, *displaypath;
- displaypath = get_submodule_displaypath(path, prefix);
+ /* try superprefix from the environment, if it is not passed explicitly */
+ if (!superprefix)
+ superprefix = get_super_prefix();
+ displaypath = do_get_submodule_displaypath(path, prefix, superprefix);
sub = submodule_from_path(the_repository, null_oid(), path);
@@ -673,7 +683,7 @@ static void init_submodule(const char *path, const char *prefix,
static void init_submodule_cb(const struct cache_entry *list_item, void *cb_data)
{
struct init_cb *info = cb_data;
- init_submodule(list_item->name, info->prefix, info->flags);
+ init_submodule(list_item->name, info->prefix, info->superprefix, info->flags);
}
static int module_init(int argc, const char **argv, const char *prefix)
@@ -1343,9 +1353,8 @@ static void sync_submodule(const char *path, const char *prefix,
{
const struct submodule *sub;
char *remote_key = NULL;
- char *sub_origin_url, *super_config_url, *displaypath;
+ char *sub_origin_url, *super_config_url, *displaypath, *default_remote;
struct strbuf sb = STRBUF_INIT;
- struct child_process cp = CHILD_PROCESS_INIT;
char *sub_config_path = NULL;
if (!is_submodule_active(the_repository, path))
@@ -1384,21 +1393,15 @@ static void sync_submodule(const char *path, const char *prefix,
if (!is_submodule_populated_gently(path, NULL))
goto cleanup;
- prepare_submodule_repo_env(&cp.env_array);
- cp.git_cmd = 1;
- cp.dir = path;
- strvec_pushl(&cp.args, "submodule--helper",
- "print-default-remote", NULL);
-
strbuf_reset(&sb);
- if (capture_command(&cp, &sb, 0))
+ default_remote = get_default_remote_submodule(path);
+ if (!default_remote)
die(_("failed to get the default remote for submodule '%s'"),
path);
- strbuf_strip_suffix(&sb, "\n");
- remote_key = xstrfmt("remote.%s.url", sb.buf);
+ remote_key = xstrfmt("remote.%s.url", default_remote);
+ free(default_remote);
- strbuf_reset(&sb);
submodule_to_gitdir(&sb, path);
strbuf_addstr(&sb, "/config");
@@ -1640,7 +1643,10 @@ struct module_clone_data {
unsigned int require_init: 1;
int single_branch;
};
-#define MODULE_CLONE_DATA_INIT { .reference = STRING_LIST_INIT_NODUP, .single_branch = -1 }
+#define MODULE_CLONE_DATA_INIT { \
+ .reference = STRING_LIST_INIT_NODUP, \
+ .single_branch = -1, \
+}
struct submodule_alternate_setup {
const char *submodule_name;
@@ -1896,7 +1902,7 @@ static int module_clone(int argc, const char **argv, const char *prefix)
const char *const git_submodule_helper_usage[] = {
N_("git submodule--helper clone [--prefix=<path>] [--quiet] "
"[--reference <repository>] [--name <name>] [--depth <depth>] "
- "[--single-branch] [--filter <filter-spec>]"
+ "[--single-branch] [--filter <filter-spec>] "
"--url <url> --path <path>"),
NULL
};
@@ -1957,29 +1963,6 @@ static void determine_submodule_update_strategy(struct repository *r,
free(key);
}
-static int module_update_module_mode(int argc, const char **argv, const char *prefix)
-{
- const char *path, *update = NULL;
- int just_cloned;
- struct submodule_update_strategy update_strategy = { .type = SM_UPDATE_CHECKOUT };
-
- if (argc < 3 || argc > 4)
- die("submodule--helper update-module-clone expects <just-cloned> <path> [<update>]");
-
- just_cloned = git_config_int("just_cloned", argv[1]);
- path = argv[2];
-
- if (argc == 4)
- update = argv[3];
-
- determine_submodule_update_strategy(the_repository,
- just_cloned, path, update,
- &update_strategy);
- fputs(submodule_strategy_to_string(&update_strategy), stdout);
-
- return 0;
-}
-
struct update_clone_data {
const struct submodule *sub;
struct object_id oid;
@@ -1987,28 +1970,13 @@ struct update_clone_data {
};
struct submodule_update_clone {
- /* index into 'list', the list of submodules to look into for cloning */
+ /* index into 'update_data.list', the list of submodules to look into for cloning */
int current;
- struct module_list list;
- unsigned warn_if_uninitialized : 1;
-
- /* update parameter passed via commandline */
- struct submodule_update_strategy update;
/* configuration parameters which are passed on to the children */
- int progress;
- int quiet;
- int recommend_shallow;
- struct string_list references;
- int dissociate;
- unsigned require_init;
- const char *depth;
- const char *recursive_prefix;
- const char *prefix;
- int single_branch;
- struct list_objects_filter_options *filter_options;
+ struct update_data *update_data;
- /* to be consumed by git-submodule.sh */
+ /* to be consumed by update_submodule() */
struct update_clone_data *update_clone;
int update_clone_nr; int update_clone_alloc;
@@ -2018,32 +1986,47 @@ struct submodule_update_clone {
/* failed clones to be retried again */
const struct cache_entry **failed_clones;
int failed_clones_nr, failed_clones_alloc;
-
- int max_jobs;
};
-#define SUBMODULE_UPDATE_CLONE_INIT { \
- .list = MODULE_LIST_INIT, \
- .update = SUBMODULE_UPDATE_STRATEGY_INIT, \
- .recommend_shallow = -1, \
- .references = STRING_LIST_INIT_DUP, \
- .single_branch = -1, \
- .max_jobs = 1, \
-}
+#define SUBMODULE_UPDATE_CLONE_INIT { 0 }
struct update_data {
+ const char *prefix;
const char *recursive_prefix;
- const char *sm_path;
const char *displaypath;
- struct object_id oid;
+ const char *update_default;
struct object_id suboid;
+ struct string_list references;
struct submodule_update_strategy update_strategy;
+ struct list_objects_filter_options *filter_options;
+ struct module_list list;
int depth;
- unsigned int force: 1;
- unsigned int quiet: 1;
- unsigned int nofetch: 1;
- unsigned int just_cloned: 1;
+ int max_jobs;
+ int single_branch;
+ int recommend_shallow;
+ unsigned int require_init;
+ unsigned int force;
+ unsigned int quiet;
+ unsigned int nofetch;
+ unsigned int remote;
+ unsigned int progress;
+ unsigned int dissociate;
+ unsigned int init;
+ unsigned int warn_if_uninitialized;
+ unsigned int recursive;
+
+ /* copied over from update_clone_data */
+ struct object_id oid;
+ unsigned int just_cloned;
+ const char *sm_path;
};
-#define UPDATE_DATA_INIT { .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT }
+#define UPDATE_DATA_INIT { \
+ .update_strategy = SUBMODULE_UPDATE_STRATEGY_INIT, \
+ .list = MODULE_LIST_INIT, \
+ .recommend_shallow = -1, \
+ .references = STRING_LIST_INIT_DUP, \
+ .single_branch = -1, \
+ .max_jobs = 1, \
+}
static void next_submodule_warn_missing(struct submodule_update_clone *suc,
struct strbuf *out, const char *displaypath)
@@ -2052,7 +2035,7 @@ static void next_submodule_warn_missing(struct submodule_update_clone *suc,
* Only mention uninitialized submodules when their
* paths have been specified.
*/
- if (suc->warn_if_uninitialized) {
+ if (suc->update_data->warn_if_uninitialized) {
strbuf_addf(out,
_("Submodule path '%s' not initialized"),
displaypath);
@@ -2084,8 +2067,8 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
int need_free_url = 0;
if (ce_stage(ce)) {
- if (suc->recursive_prefix)
- strbuf_addf(&sb, "%s/%s", suc->recursive_prefix, ce->name);
+ if (suc->update_data->recursive_prefix)
+ strbuf_addf(&sb, "%s/%s", suc->update_data->recursive_prefix, ce->name);
else
strbuf_addstr(&sb, ce->name);
strbuf_addf(out, _("Skipping unmerged submodule %s"), sb.buf);
@@ -2095,8 +2078,8 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
sub = submodule_from_path(the_repository, null_oid(), ce->name);
- if (suc->recursive_prefix)
- displaypath = relative_path(suc->recursive_prefix,
+ if (suc->update_data->recursive_prefix)
+ displaypath = relative_path(suc->update_data->recursive_prefix,
ce->name, &displaypath_sb);
else
displaypath = ce->name;
@@ -2114,8 +2097,8 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
}
free(key);
- if (suc->update.type == SM_UPDATE_NONE
- || (suc->update.type == SM_UPDATE_UNSPECIFIED
+ if (suc->update_data->update_strategy.type == SM_UPDATE_NONE
+ || (suc->update_data->update_strategy.type == SM_UPDATE_UNSPECIFIED
&& update_type == SM_UPDATE_NONE)) {
strbuf_addf(out, _("Skipping submodule '%s'"), displaypath);
strbuf_addch(out, '\n');
@@ -2159,33 +2142,33 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
child->err = -1;
strvec_push(&child->args, "submodule--helper");
strvec_push(&child->args, "clone");
- if (suc->progress)
+ if (suc->update_data->progress)
strvec_push(&child->args, "--progress");
- if (suc->quiet)
+ if (suc->update_data->quiet)
strvec_push(&child->args, "--quiet");
- if (suc->prefix)
- strvec_pushl(&child->args, "--prefix", suc->prefix, NULL);
- if (suc->recommend_shallow && sub->recommend_shallow == 1)
+ if (suc->update_data->prefix)
+ strvec_pushl(&child->args, "--prefix", suc->update_data->prefix, NULL);
+ if (suc->update_data->recommend_shallow && sub->recommend_shallow == 1)
strvec_push(&child->args, "--depth=1");
- if (suc->filter_options && suc->filter_options->choice)
+ else if (suc->update_data->depth)
+ strvec_pushf(&child->args, "--depth=%d", suc->update_data->depth);
+ if (suc->update_data->filter_options && suc->update_data->filter_options->choice)
strvec_pushf(&child->args, "--filter=%s",
- expand_list_objects_filter_spec(suc->filter_options));
- if (suc->require_init)
+ expand_list_objects_filter_spec(suc->update_data->filter_options));
+ if (suc->update_data->require_init)
strvec_push(&child->args, "--require-init");
strvec_pushl(&child->args, "--path", sub->path, NULL);
strvec_pushl(&child->args, "--name", sub->name, NULL);
strvec_pushl(&child->args, "--url", url, NULL);
- if (suc->references.nr) {
+ if (suc->update_data->references.nr) {
struct string_list_item *item;
- for_each_string_list_item(item, &suc->references)
+ for_each_string_list_item(item, &suc->update_data->references)
strvec_pushl(&child->args, "--reference", item->string, NULL);
}
- if (suc->dissociate)
+ if (suc->update_data->dissociate)
strvec_push(&child->args, "--dissociate");
- if (suc->depth)
- strvec_push(&child->args, suc->depth);
- if (suc->single_branch >= 0)
- strvec_push(&child->args, suc->single_branch ?
+ if (suc->update_data->single_branch >= 0)
+ strvec_push(&child->args, suc->update_data->single_branch ?
"--single-branch" :
"--no-single-branch");
@@ -2207,8 +2190,8 @@ static int update_clone_get_next_task(struct child_process *child,
const struct cache_entry *ce;
int index;
- for (; suc->current < suc->list.nr; suc->current++) {
- ce = suc->list.entries[suc->current];
+ for (; suc->current < suc->update_data->list.nr; suc->current++) {
+ ce = suc->update_data->list.entries[suc->current];
if (prepare_to_clone_next_submodule(ce, child, suc, err)) {
int *p = xmalloc(sizeof(*p));
*p = suc->current;
@@ -2223,7 +2206,7 @@ static int update_clone_get_next_task(struct child_process *child,
* stragglers again, which we can imagine as an extension of the
* entry list.
*/
- index = suc->current - suc->list.nr;
+ index = suc->current - suc->update_data->list.nr;
if (index < suc->failed_clones_nr) {
int *p;
ce = suc->failed_clones[index];
@@ -2268,8 +2251,8 @@ static int update_clone_task_finished(int result,
if (!result)
return 0;
- if (idx < suc->list.nr) {
- ce = suc->list.entries[idx];
+ if (idx < suc->update_data->list.nr) {
+ ce = suc->update_data->list.entries[idx];
strbuf_addf(err, _("Failed to clone '%s'. Retry scheduled"),
ce->name);
strbuf_addch(err, '\n');
@@ -2279,7 +2262,7 @@ static int update_clone_task_finished(int result,
suc->failed_clones[suc->failed_clones_nr++] = ce;
return 0;
} else {
- idx -= suc->list.nr;
+ idx -= suc->update_data->list.nr;
ce = suc->failed_clones[idx];
strbuf_addf(err, _("Failed to clone '%s' a second time, aborting"),
ce->name);
@@ -2343,83 +2326,76 @@ static int fetch_in_submodule(const char *module_path, int depth, int quiet, str
static int run_update_command(struct update_data *ud, int subforce)
{
- struct strvec args = STRVEC_INIT;
- struct strvec child_env = STRVEC_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
char *oid = oid_to_hex(&ud->oid);
int must_die_on_failure = 0;
- int git_cmd;
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
- git_cmd = 1;
- strvec_pushl(&args, "checkout", "-q", NULL);
+ cp.git_cmd = 1;
+ strvec_pushl(&cp.args, "checkout", "-q", NULL);
if (subforce)
- strvec_push(&args, "-f");
+ strvec_push(&cp.args, "-f");
break;
case SM_UPDATE_REBASE:
- git_cmd = 1;
- strvec_push(&args, "rebase");
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "rebase");
if (ud->quiet)
- strvec_push(&args, "--quiet");
+ strvec_push(&cp.args, "--quiet");
must_die_on_failure = 1;
break;
case SM_UPDATE_MERGE:
- git_cmd = 1;
- strvec_push(&args, "merge");
+ cp.git_cmd = 1;
+ strvec_push(&cp.args, "merge");
if (ud->quiet)
- strvec_push(&args, "--quiet");
+ strvec_push(&cp.args, "--quiet");
must_die_on_failure = 1;
break;
case SM_UPDATE_COMMAND:
- git_cmd = 0;
- strvec_push(&args, ud->update_strategy.command);
+ cp.use_shell = 1;
+ strvec_push(&cp.args, ud->update_strategy.command);
must_die_on_failure = 1;
break;
default:
BUG("unexpected update strategy type: %s",
submodule_strategy_to_string(&ud->update_strategy));
}
- strvec_push(&args, oid);
+ strvec_push(&cp.args, oid);
- prepare_submodule_repo_env(&child_env);
- if (run_command_v_opt_cd_env(args.v, git_cmd ? RUN_GIT_CMD : RUN_USING_SHELL,
- ud->sm_path, child_env.v)) {
+ cp.dir = xstrdup(ud->sm_path);
+ prepare_submodule_repo_env(&cp.env_array);
+ if (run_command(&cp)) {
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
- printf(_("Unable to checkout '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ die_message(_("Unable to checkout '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
break;
case SM_UPDATE_REBASE:
- printf(_("Unable to rebase '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ die_message(_("Unable to rebase '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
break;
case SM_UPDATE_MERGE:
- printf(_("Unable to merge '%s' in submodule path '%s'"),
- oid, ud->displaypath);
+ die_message(_("Unable to merge '%s' in submodule path '%s'"),
+ oid, ud->displaypath);
break;
case SM_UPDATE_COMMAND:
- printf(_("Execution of '%s %s' failed in submodule path '%s'"),
- ud->update_strategy.command, oid, ud->displaypath);
+ die_message(_("Execution of '%s %s' failed in submodule path '%s'"),
+ ud->update_strategy.command, oid, ud->displaypath);
break;
default:
BUG("unexpected update strategy type: %s",
submodule_strategy_to_string(&ud->update_strategy));
}
- /*
- * NEEDSWORK: We are currently printing to stdout with error
- * return so that the shell caller handles the error output
- * properly. Once we start handling the error messages within
- * C, we should use die() instead.
- */
if (must_die_on_failure)
- return 2;
- /*
- * This signifies to the caller in shell that the command
- * failed without dying
- */
+ exit(128);
+
+ /* the command failed, but update must continue */
return 1;
}
+ if (ud->quiet)
+ return 0;
+
switch (ud->update_strategy.type) {
case SM_UPDATE_CHECKOUT:
printf(_("Submodule path '%s': checked out '%s'\n"),
@@ -2445,7 +2421,7 @@ static int run_update_command(struct update_data *ud, int subforce)
return 0;
}
-static int do_run_update_procedure(struct update_data *ud)
+static int run_update_procedure(struct update_data *ud)
{
int subforce = is_null_oid(&ud->suboid) || ud->force;
@@ -2475,21 +2451,211 @@ static int do_run_update_procedure(struct update_data *ud)
return run_update_command(ud, subforce);
}
-static void update_submodule(struct update_clone_data *ucd)
+static const char *remote_submodule_branch(const char *path)
+{
+ const struct submodule *sub;
+ const char *branch = NULL;
+ char *key;
+
+ sub = submodule_from_path(the_repository, null_oid(), path);
+ if (!sub)
+ return NULL;
+
+ key = xstrfmt("submodule.%s.branch", sub->name);
+ if (repo_config_get_string_tmp(the_repository, key, &branch))
+ branch = sub->branch;
+ free(key);
+
+ if (!branch)
+ return "HEAD";
+
+ if (!strcmp(branch, ".")) {
+ const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+
+ if (!refname)
+ die(_("No such ref: %s"), "HEAD");
+
+ /* detached HEAD */
+ if (!strcmp(refname, "HEAD"))
+ die(_("Submodule (%s) branch configured to inherit "
+ "branch from superproject, but the superproject "
+ "is not on any branch"), sub->name);
+
+ if (!skip_prefix(refname, "refs/heads/", &refname))
+ die(_("Expecting a full ref name, got %s"), refname);
+ return refname;
+ }
+
+ return branch;
+}
+
+static void ensure_core_worktree(const char *path)
{
- fprintf(stdout, "dummy %s %d\t%s\n",
- oid_to_hex(&ucd->oid),
- ucd->just_cloned,
- ucd->sub->path);
+ const char *cw;
+ struct repository subrepo;
+
+ if (repo_submodule_init(&subrepo, the_repository, path, null_oid()))
+ die(_("could not get a repository handle for submodule '%s'"), path);
+
+ if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) {
+ char *cfg_file, *abs_path;
+ const char *rel_path;
+ struct strbuf sb = STRBUF_INIT;
+
+ cfg_file = repo_git_path(&subrepo, "config");
+
+ abs_path = absolute_pathdup(path);
+ rel_path = relative_path(abs_path, subrepo.gitdir, &sb);
+
+ git_config_set_in_file(cfg_file, "core.worktree", rel_path);
+
+ free(cfg_file);
+ free(abs_path);
+ strbuf_release(&sb);
+ }
+}
+
+static void update_data_to_args(struct update_data *update_data, struct strvec *args)
+{
+ strvec_pushl(args, "submodule--helper", "update", "--recursive", NULL);
+ strvec_pushf(args, "--jobs=%d", update_data->max_jobs);
+ if (update_data->recursive_prefix)
+ strvec_pushl(args, "--recursive-prefix",
+ update_data->recursive_prefix, NULL);
+ if (update_data->quiet)
+ strvec_push(args, "--quiet");
+ if (update_data->force)
+ strvec_push(args, "--force");
+ if (update_data->init)
+ strvec_push(args, "--init");
+ if (update_data->remote)
+ strvec_push(args, "--remote");
+ if (update_data->nofetch)
+ strvec_push(args, "--no-fetch");
+ if (update_data->dissociate)
+ strvec_push(args, "--dissociate");
+ if (update_data->progress)
+ strvec_push(args, "--progress");
+ if (update_data->require_init)
+ strvec_push(args, "--require-init");
+ if (update_data->depth)
+ strvec_pushf(args, "--depth=%d", update_data->depth);
+ if (update_data->update_default)
+ strvec_pushl(args, "--update", update_data->update_default, NULL);
+ if (update_data->references.nr) {
+ struct string_list_item *item;
+ for_each_string_list_item(item, &update_data->references)
+ strvec_pushl(args, "--reference", item->string, NULL);
+ }
+ if (update_data->filter_options && update_data->filter_options->choice)
+ strvec_pushf(args, "--filter=%s",
+ expand_list_objects_filter_spec(
+ update_data->filter_options));
+ if (update_data->recommend_shallow == 0)
+ strvec_push(args, "--no-recommend-shallow");
+ else if (update_data->recommend_shallow == 1)
+ strvec_push(args, "--recommend-shallow");
+ if (update_data->single_branch >= 0)
+ strvec_push(args, update_data->single_branch ?
+ "--single-branch" :
+ "--no-single-branch");
}
-static int update_submodules(struct submodule_update_clone *suc)
+static int update_submodule(struct update_data *update_data)
{
- int i;
+ char *prefixed_path;
+
+ ensure_core_worktree(update_data->sm_path);
- run_processes_parallel_tr2(suc->max_jobs, update_clone_get_next_task,
+ if (update_data->recursive_prefix)
+ prefixed_path = xstrfmt("%s%s", update_data->recursive_prefix,
+ update_data->sm_path);
+ else
+ prefixed_path = xstrdup(update_data->sm_path);
+
+ update_data->displaypath = get_submodule_displaypath(prefixed_path,
+ update_data->prefix);
+ free(prefixed_path);
+
+ determine_submodule_update_strategy(the_repository, update_data->just_cloned,
+ update_data->sm_path, update_data->update_default,
+ &update_data->update_strategy);
+
+ if (update_data->just_cloned)
+ oidcpy(&update_data->suboid, null_oid());
+ else if (resolve_gitlink_ref(update_data->sm_path, "HEAD", &update_data->suboid))
+ die(_("Unable to find current revision in submodule path '%s'"),
+ update_data->displaypath);
+
+ if (update_data->remote) {
+ char *remote_name = get_default_remote_submodule(update_data->sm_path);
+ const char *branch = remote_submodule_branch(update_data->sm_path);
+ char *remote_ref = xstrfmt("refs/remotes/%s/%s", remote_name, branch);
+
+ if (!update_data->nofetch) {
+ if (fetch_in_submodule(update_data->sm_path, update_data->depth,
+ 0, NULL))
+ die(_("Unable to fetch in submodule path '%s'"),
+ update_data->sm_path);
+ }
+
+ if (resolve_gitlink_ref(update_data->sm_path, remote_ref, &update_data->oid))
+ die(_("Unable to find %s revision in submodule path '%s'"),
+ remote_ref, update_data->sm_path);
+
+ free(remote_ref);
+ }
+
+ if (!oideq(&update_data->oid, &update_data->suboid) || update_data->force)
+ if (run_update_procedure(update_data))
+ return 1;
+
+ if (update_data->recursive) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct update_data next = *update_data;
+ int res;
+
+ if (update_data->recursive_prefix)
+ prefixed_path = xstrfmt("%s%s/", update_data->recursive_prefix,
+ update_data->sm_path);
+ else
+ prefixed_path = xstrfmt("%s/", update_data->sm_path);
+
+ next.recursive_prefix = get_submodule_displaypath(prefixed_path,
+ update_data->prefix);
+ next.prefix = NULL;
+ oidcpy(&next.oid, null_oid());
+ oidcpy(&next.suboid, null_oid());
+
+ cp.dir = update_data->sm_path;
+ cp.git_cmd = 1;
+ prepare_submodule_repo_env(&cp.env_array);
+ update_data_to_args(&next, &cp.args);
+
+ /* die() if child process die()'d */
+ res = run_command(&cp);
+ if (!res)
+ return 0;
+ die_message(_("Failed to recurse into submodule path '%s'"),
+ update_data->displaypath);
+ if (res == 128)
+ exit(res);
+ else if (res)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int update_submodules(struct update_data *update_data)
+{
+ int i, res = 0;
+ struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
+
+ suc.update_data = update_data;
+ run_processes_parallel_tr2(suc.update_data->max_jobs, update_clone_get_next_task,
update_clone_start_failure,
- update_clone_task_finished, suc, "submodule",
+ update_clone_task_finished, &suc, "submodule",
"parallel/update");
/*
@@ -2500,218 +2666,139 @@ static int update_submodules(struct submodule_update_clone *suc)
* checkout involve more straightforward sequential I/O.
* - the listener can avoid doing any work if fetching failed.
*/
- if (suc->quickstop)
- return 1;
+ if (suc.quickstop) {
+ res = 1;
+ goto cleanup;
+ }
- for (i = 0; i < suc->update_clone_nr; i++)
- update_submodule(&suc->update_clone[i]);
+ for (i = 0; i < suc.update_clone_nr; i++) {
+ struct update_clone_data ucd = suc.update_clone[i];
- return 0;
+ oidcpy(&update_data->oid, &ucd.oid);
+ update_data->just_cloned = ucd.just_cloned;
+ update_data->sm_path = ucd.sub->path;
+
+ if (update_submodule(update_data))
+ res = 1;
+ }
+
+cleanup:
+ string_list_clear(&update_data->references, 0);
+ return res;
}
-static int update_clone(int argc, const char **argv, const char *prefix)
+static int module_update(int argc, const char **argv, const char *prefix)
{
- const char *update = NULL;
struct pathspec pathspec;
- struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
+ struct update_data opt = UPDATE_DATA_INIT;
struct list_objects_filter_options filter_options;
int ret;
- struct option module_update_clone_options[] = {
- OPT_STRING(0, "prefix", &prefix,
+ struct option module_update_options[] = {
+ OPT__FORCE(&opt.force, N_("force checkout updates"), 0),
+ OPT_BOOL(0, "init", &opt.init,
+ N_("initialize uninitialized submodules before update")),
+ OPT_BOOL(0, "remote", &opt.remote,
+ N_("use SHA-1 of submodule's remote tracking branch")),
+ OPT_BOOL(0, "recursive", &opt.recursive,
+ N_("traverse submodules recursively")),
+ OPT_BOOL('N', "no-fetch", &opt.nofetch,
+ N_("don't fetch new objects from the remote site")),
+ OPT_STRING(0, "prefix", &opt.prefix,
N_("path"),
N_("path into the working tree")),
- OPT_STRING(0, "recursive-prefix", &suc.recursive_prefix,
+ OPT_STRING(0, "recursive-prefix", &opt.recursive_prefix,
N_("path"),
N_("path into the working tree, across nested "
"submodule boundaries")),
- OPT_STRING(0, "update", &update,
+ OPT_STRING(0, "update", &opt.update_default,
N_("string"),
N_("rebase, merge, checkout or none")),
- OPT_STRING_LIST(0, "reference", &suc.references, N_("repo"),
+ OPT_STRING_LIST(0, "reference", &opt.references, N_("repo"),
N_("reference repository")),
- OPT_BOOL(0, "dissociate", &suc.dissociate,
+ OPT_BOOL(0, "dissociate", &opt.dissociate,
N_("use --reference only while cloning")),
- OPT_STRING(0, "depth", &suc.depth, "<depth>",
+ OPT_INTEGER(0, "depth", &opt.depth,
N_("create a shallow clone truncated to the "
"specified number of revisions")),
- OPT_INTEGER('j', "jobs", &suc.max_jobs,
+ OPT_INTEGER('j', "jobs", &opt.max_jobs,
N_("parallel jobs")),
- OPT_BOOL(0, "recommend-shallow", &suc.recommend_shallow,
+ OPT_BOOL(0, "recommend-shallow", &opt.recommend_shallow,
N_("whether the initial clone should follow the shallow recommendation")),
- OPT__QUIET(&suc.quiet, N_("don't print cloning progress")),
- OPT_BOOL(0, "progress", &suc.progress,
+ OPT__QUIET(&opt.quiet, N_("don't print cloning progress")),
+ OPT_BOOL(0, "progress", &opt.progress,
N_("force cloning progress")),
- OPT_BOOL(0, "require-init", &suc.require_init,
+ OPT_BOOL(0, "require-init", &opt.require_init,
N_("disallow cloning into non-empty directory")),
- OPT_BOOL(0, "single-branch", &suc.single_branch,
+ OPT_BOOL(0, "single-branch", &opt.single_branch,
N_("clone only one branch, HEAD or --branch")),
OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper update-clone [--prefix=<path>] [<path>...]"),
+ N_("git submodule [--quiet] update"
+ " [--init [--filter=<filter-spec>]] [--remote]"
+ " [-N|--no-fetch] [-f|--force]"
+ " [--checkout|--merge|--rebase]"
+ " [--[no-]recommend-shallow] [--reference <repository>]"
+ " [--recursive] [--[no-]single-branch] [--] [<path>...]"),
NULL
};
- suc.prefix = prefix;
- update_clone_config_from_gitmodules(&suc.max_jobs);
- git_config(git_update_clone_config, &suc.max_jobs);
+ update_clone_config_from_gitmodules(&opt.max_jobs);
+ git_config(git_update_clone_config, &opt.max_jobs);
memset(&filter_options, 0, sizeof(filter_options));
- argc = parse_options(argc, argv, prefix, module_update_clone_options,
+ argc = parse_options(argc, argv, prefix, module_update_options,
git_submodule_helper_usage, 0);
- suc.filter_options = &filter_options;
- if (update)
- if (parse_submodule_update_strategy(update, &suc.update) < 0)
+ if (filter_options.choice && !opt.init) {
+ usage_with_options(git_submodule_helper_usage,
+ module_update_options);
+ }
+
+ opt.filter_options = &filter_options;
+
+ if (opt.update_default)
+ if (parse_submodule_update_strategy(opt.update_default,
+ &opt.update_strategy) < 0)
die(_("bad value for update parameter"));
- if (module_list_compute(argc, argv, prefix, &pathspec, &suc.list) < 0) {
+ if (module_list_compute(argc, argv, prefix, &pathspec, &opt.list) < 0) {
list_objects_filter_release(&filter_options);
return 1;
}
if (pathspec.nr)
- suc.warn_if_uninitialized = 1;
-
- ret = update_submodules(&suc);
- list_objects_filter_release(&filter_options);
- return ret;
-}
-
-static int run_update_procedure(int argc, const char **argv, const char *prefix)
-{
- int force = 0, quiet = 0, nofetch = 0, just_cloned = 0;
- char *prefixed_path, *update = NULL;
- struct update_data update_data = UPDATE_DATA_INIT;
-
- struct option options[] = {
- OPT__QUIET(&quiet, N_("suppress output for update by rebase or merge")),
- OPT__FORCE(&force, N_("force checkout updates"), 0),
- OPT_BOOL('N', "no-fetch", &nofetch,
- N_("don't fetch new objects from the remote site")),
- OPT_BOOL(0, "just-cloned", &just_cloned,
- N_("overrides update mode in case the repository is a fresh clone")),
- OPT_INTEGER(0, "depth", &update_data.depth, N_("depth for shallow fetch")),
- OPT_STRING(0, "prefix", &prefix,
- N_("path"),
- N_("path into the working tree")),
- OPT_STRING(0, "update", &update,
- N_("string"),
- N_("rebase, merge, checkout or none")),
- OPT_STRING(0, "recursive-prefix", &update_data.recursive_prefix, N_("path"),
- N_("path into the working tree, across nested "
- "submodule boundaries")),
- OPT_CALLBACK_F(0, "oid", &update_data.oid, N_("sha1"),
- N_("SHA1 expected by superproject"), PARSE_OPT_NONEG,
- parse_opt_object_id),
- OPT_CALLBACK_F(0, "suboid", &update_data.suboid, N_("subsha1"),
- N_("SHA1 of submodule's HEAD"), PARSE_OPT_NONEG,
- parse_opt_object_id),
- OPT_END()
- };
-
- const char *const usage[] = {
- N_("git submodule--helper run-update-procedure [<options>] <path>"),
- NULL
- };
-
- argc = parse_options(argc, argv, prefix, options, usage, 0);
-
- if (argc != 1)
- usage_with_options(usage, options);
-
- update_data.force = !!force;
- update_data.quiet = !!quiet;
- update_data.nofetch = !!nofetch;
- update_data.just_cloned = !!just_cloned;
- update_data.sm_path = argv[0];
-
- if (update_data.recursive_prefix)
- prefixed_path = xstrfmt("%s%s", update_data.recursive_prefix, update_data.sm_path);
- else
- prefixed_path = xstrdup(update_data.sm_path);
-
- update_data.displaypath = get_submodule_displaypath(prefixed_path, prefix);
-
- determine_submodule_update_strategy(the_repository, update_data.just_cloned,
- update_data.sm_path, update,
- &update_data.update_strategy);
+ opt.warn_if_uninitialized = 1;
- free(prefixed_path);
-
- if (!oideq(&update_data.oid, &update_data.suboid) || update_data.force)
- return do_run_update_procedure(&update_data);
-
- return 3;
-}
-
-static int resolve_relative_path(int argc, const char **argv, const char *prefix)
-{
- struct strbuf sb = STRBUF_INIT;
- if (argc != 3)
- die("submodule--helper relative-path takes exactly 2 arguments, got %d", argc);
+ if (opt.init) {
+ struct module_list list = MODULE_LIST_INIT;
+ struct init_cb info = INIT_CB_INIT;
- printf("%s", relative_path(argv[1], argv[2], &sb));
- strbuf_release(&sb);
- return 0;
-}
-
-static const char *remote_submodule_branch(const char *path)
-{
- const struct submodule *sub;
- const char *branch = NULL;
- char *key;
-
- sub = submodule_from_path(the_repository, null_oid(), path);
- if (!sub)
- return NULL;
-
- key = xstrfmt("submodule.%s.branch", sub->name);
- if (repo_config_get_string_tmp(the_repository, key, &branch))
- branch = sub->branch;
- free(key);
-
- if (!branch)
- return "HEAD";
-
- if (!strcmp(branch, ".")) {
- const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+ if (module_list_compute(argc, argv, opt.prefix,
+ &pathspec, &list) < 0)
+ return 1;
- if (!refname)
- die(_("No such ref: %s"), "HEAD");
+ /*
+ * If there are no path args and submodule.active is set then,
+ * by default, only initialize 'active' modules.
+ */
+ if (!argc && git_config_get_value_multi("submodule.active"))
+ module_list_active(&list);
- /* detached HEAD */
- if (!strcmp(refname, "HEAD"))
- die(_("Submodule (%s) branch configured to inherit "
- "branch from superproject, but the superproject "
- "is not on any branch"), sub->name);
+ info.prefix = opt.prefix;
+ info.superprefix = opt.recursive_prefix;
+ if (opt.quiet)
+ info.flags |= OPT_QUIET;
- if (!skip_prefix(refname, "refs/heads/", &refname))
- die(_("Expecting a full ref name, got %s"), refname);
- return refname;
+ for_each_listed_submodule(&list, init_submodule_cb, &info);
}
- return branch;
-}
-
-static int resolve_remote_submodule_branch(int argc, const char **argv,
- const char *prefix)
-{
- const char *ret;
- struct strbuf sb = STRBUF_INIT;
- if (argc != 2)
- die("submodule--helper remote-branch takes exactly one arguments, got %d", argc);
-
- ret = remote_submodule_branch(argv[1]);
- if (!ret)
- die("submodule %s doesn't exist", argv[1]);
-
- printf("%s", ret);
- strbuf_release(&sb);
- return 0;
+ ret = update_submodules(&opt);
+ list_objects_filter_release(&filter_options);
+ return ret;
}
static int push_check(int argc, const char **argv, const char *prefix)
@@ -2791,40 +2878,6 @@ static int push_check(int argc, const char **argv, const char *prefix)
return 0;
}
-static int ensure_core_worktree(int argc, const char **argv, const char *prefix)
-{
- const char *path;
- const char *cw;
- struct repository subrepo;
-
- if (argc != 2)
- BUG("submodule--helper ensure-core-worktree <path>");
-
- path = argv[1];
-
- if (repo_submodule_init(&subrepo, the_repository, path, null_oid()))
- die(_("could not get a repository handle for submodule '%s'"), path);
-
- if (!repo_config_get_string_tmp(&subrepo, "core.worktree", &cw)) {
- char *cfg_file, *abs_path;
- const char *rel_path;
- struct strbuf sb = STRBUF_INIT;
-
- cfg_file = repo_git_path(&subrepo, "config");
-
- abs_path = absolute_pathdup(path);
- rel_path = relative_path(abs_path, subrepo.gitdir, &sb);
-
- git_config_set_in_file(cfg_file, "core.worktree", rel_path);
-
- free(cfg_file);
- free(abs_path);
- strbuf_release(&sb);
- }
-
- return 0;
-}
-
static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
{
int i;
@@ -3019,15 +3072,16 @@ static int module_create_branch(int argc, const char **argv, const char *prefix)
OPT__FORCE(&force, N_("force creation"), 0),
OPT_BOOL(0, "create-reflog", &reflog,
N_("create the branch's reflog")),
- OPT_SET_INT('t', "track", &track,
- N_("set up tracking mode (see git-pull(1))"),
- BRANCH_TRACK_EXPLICIT),
+ OPT_CALLBACK_F('t', "track", &track, "(direct|inherit)",
+ N_("set branch tracking configuration"),
+ PARSE_OPT_OPTARG,
+ parse_opt_tracking_mode),
OPT__DRY_RUN(&dry_run,
N_("show whether the branch would be created")),
OPT_END()
};
const char *const usage[] = {
- N_("git submodule--helper create-branch [-f|--force] [--create-reflog] [-q|--quiet] [-t|--track] [-n|--dry-run] <name> <start_oid> <start_name>"),
+ N_("git submodule--helper create-branch [-f|--force] [--create-reflog] [-q|--quiet] [-t|--track] [-n|--dry-run] <name> <start-oid> <start-name>"),
NULL
};
@@ -3045,6 +3099,7 @@ static int module_create_branch(int argc, const char **argv, const char *prefix)
force, reflog, quiet, track, dry_run);
return 0;
}
+
struct add_data {
const char *prefix;
const char *branch;
@@ -3433,20 +3488,14 @@ static struct cmd_struct commands[] = {
{"name", module_name, 0},
{"clone", module_clone, 0},
{"add", module_add, SUPPORT_SUPER_PREFIX},
- {"update-module-mode", module_update_module_mode, 0},
- {"update-clone", update_clone, 0},
- {"run-update-procedure", run_update_procedure, 0},
- {"ensure-core-worktree", ensure_core_worktree, 0},
- {"relative-path", resolve_relative_path, 0},
+ {"update", module_update, 0},
{"resolve-relative-url-test", resolve_relative_url_test, 0},
{"foreach", module_foreach, SUPPORT_SUPER_PREFIX},
{"init", module_init, SUPPORT_SUPER_PREFIX},
{"status", module_status, SUPPORT_SUPER_PREFIX},
- {"print-default-remote", print_default_remote, 0},
{"sync", module_sync, SUPPORT_SUPER_PREFIX},
{"deinit", module_deinit, 0},
{"summary", module_summary, SUPPORT_SUPER_PREFIX},
- {"remote-branch", resolve_remote_submodule_branch, 0},
{"push-check", push_check, 0},
{"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
{"is-active", is_active, 0},
diff --git a/builtin/update-index.c b/builtin/update-index.c
index aafe7eeac2..876112abb2 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -1236,14 +1236,17 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
}
if (fsmonitor > 0) {
- if (git_config_get_fsmonitor() == 0)
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
+ if (fsm_mode == FSMONITOR_MODE_DISABLED) {
warning(_("core.fsmonitor is unset; "
"set it if you really want to "
"enable fsmonitor"));
+ }
add_fsmonitor(&the_index);
report(_("fsmonitor enabled"));
} else if (!fsmonitor) {
- if (git_config_get_fsmonitor() == 1)
+ enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
+ if (fsm_mode > FSMONITOR_MODE_DISABLED)
warning(_("core.fsmonitor is set; "
"remove it if you really want to "
"disable fsmonitor"));
diff --git a/builtin/worktree.c b/builtin/worktree.c
index 4eaba2a8fd..8b32cd1651 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -22,6 +22,7 @@ static const char * const worktree_usage[] = {
N_("git worktree move <worktree> <new-path>"),
N_("git worktree prune [<options>]"),
N_("git worktree remove [<options>] <worktree>"),
+ N_("git worktree repair [<path>...]"),
N_("git worktree unlock <path>"),
NULL
};
@@ -650,35 +651,37 @@ static int add(int ac, const char **av, const char *prefix)
return add_worktree(path, branch, &opts);
}
-static void show_worktree_porcelain(struct worktree *wt)
+static void show_worktree_porcelain(struct worktree *wt, int line_terminator)
{
const char *reason;
- printf("worktree %s\n", wt->path);
+ printf("worktree %s%c", wt->path, line_terminator);
if (wt->is_bare)
- printf("bare\n");
+ printf("bare%c", line_terminator);
else {
- printf("HEAD %s\n", oid_to_hex(&wt->head_oid));
+ printf("HEAD %s%c", oid_to_hex(&wt->head_oid), line_terminator);
if (wt->is_detached)
- printf("detached\n");
+ printf("detached%c", line_terminator);
else if (wt->head_ref)
- printf("branch %s\n", wt->head_ref);
+ printf("branch %s%c", wt->head_ref, line_terminator);
}
reason = worktree_lock_reason(wt);
- if (reason && *reason) {
- struct strbuf sb = STRBUF_INIT;
- quote_c_style(reason, &sb, NULL, 0);
- printf("locked %s\n", sb.buf);
- strbuf_release(&sb);
- } else if (reason)
- printf("locked\n");
+ if (reason) {
+ fputs("locked", stdout);
+ if (*reason) {
+ fputc(' ', stdout);
+ write_name_quoted(reason, stdout, line_terminator);
+ } else {
+ fputc(line_terminator, stdout);
+ }
+ }
reason = worktree_prune_reason(wt, expire);
if (reason)
- printf("prunable %s\n", reason);
+ printf("prunable %s%c", reason, line_terminator);
- printf("\n");
+ fputc(line_terminator, stdout);
}
static void show_worktree(struct worktree *wt, int path_maxlen, int abbrev_len)
@@ -756,12 +759,15 @@ static void pathsort(struct worktree **wt)
static int list(int ac, const char **av, const char *prefix)
{
int porcelain = 0;
+ int line_terminator = '\n';
struct option options[] = {
OPT_BOOL(0, "porcelain", &porcelain, N_("machine-readable output")),
OPT__VERBOSE(&verbose, N_("show extended annotations and reasons, if available")),
OPT_EXPIRY_DATE(0, "expire", &expire,
N_("add 'prunable' annotation to worktrees older than <time>")),
+ OPT_SET_INT('z', NULL, &line_terminator,
+ N_("terminate records with a NUL character"), '\0'),
OPT_END()
};
@@ -771,6 +777,8 @@ static int list(int ac, const char **av, const char *prefix)
usage_with_options(worktree_usage, options);
else if (verbose && porcelain)
die(_("options '%s' and '%s' cannot be used together"), "--verbose", "--porcelain");
+ else if (!line_terminator && !porcelain)
+ die(_("the option '%s' requires '%s'"), "-z", "--porcelain");
else {
struct worktree **worktrees = get_worktrees();
int path_maxlen = 0, abbrev = DEFAULT_ABBREV, i;
@@ -783,7 +791,8 @@ static int list(int ac, const char **av, const char *prefix)
for (i = 0; worktrees[i]; i++) {
if (porcelain)
- show_worktree_porcelain(worktrees[i]);
+ show_worktree_porcelain(worktrees[i],
+ line_terminator);
else
show_worktree(worktrees[i], path_maxlen, abbrev);
}