From 7b29ef457cd5a7b45a26a4535ae7a352db797977 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Wed, 5 Apr 2017 10:58:09 -0600 Subject: [PATCH 001/104] reset --stdin: trim carriage return from the paths While using the reset --stdin feature on windows path added may have a \r at the end of the path that wasn't getting removed so didn't match the path in the index and wasn't reset. Signed-off-by: Kevin Willford --- t/t7108-reset-stdin.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/t/t7108-reset-stdin.sh b/t/t7108-reset-stdin.sh index b7cbcbf869296c..db5483b8f10052 100755 --- a/t/t7108-reset-stdin.sh +++ b/t/t7108-reset-stdin.sh @@ -29,4 +29,13 @@ test_expect_success '--stdin requires --mixed' ' git reset --mixed --stdin list && + git reset --stdin Date: Tue, 24 Jan 2017 17:44:31 +0100 Subject: [PATCH 002/104] gvfs: start by adding the -gvfs suffix to the version Signed-off-by: Saeed Noursalehi --- GIT-VERSION-GEN | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN index 06a5333ee63891..4670236cca7490 100755 --- a/GIT-VERSION-GEN +++ b/GIT-VERSION-GEN @@ -1,7 +1,7 @@ #!/bin/sh GVF=GIT-VERSION-FILE -DEF_VER=v2.27.0 +DEF_VER=v2.27.0.vfs.1.1 LF=' ' From 758bdf42ad45e97831b24b06907a003207945f32 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Tue, 4 Apr 2017 12:04:11 +0200 Subject: [PATCH 003/104] gvfs: ensure that the version is based on a GVFS tag Signed-off-by: Johannes Schindelin --- GIT-VERSION-GEN | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN index 4670236cca7490..376b9bd829a10f 100755 --- a/GIT-VERSION-GEN +++ b/GIT-VERSION-GEN @@ -12,7 +12,7 @@ if test -f version then VN=$(cat version) || VN="$DEF_VER" elif test -d ${GIT_DIR:-.git} -o -f .git && - VN=$(git describe --match "v[0-9]*" HEAD 2>/dev/null) && + VN=$(git describe --match "v[0-9]*vfs*" HEAD 2>/dev/null) && case "$VN" in *$LF*) (exit 1) ;; v[0-9]*) From a18c086fc9e9c2cdcbe4316751b3cde472f7928b Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Tue, 24 Jan 2017 17:30:59 +0100 Subject: [PATCH 004/104] gvfs: add a GVFS-specific header file This header file will accumulate GVFS-specific definitions. Signed-off-by: Kevin Willford --- gvfs.h | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 gvfs.h diff --git a/gvfs.h b/gvfs.h new file mode 100644 index 00000000000000..b6dbe85eae4071 --- /dev/null +++ b/gvfs.h @@ -0,0 +1,9 @@ +#ifndef GVFS_H +#define GVFS_H + +/* + * This file is for the specific settings and methods + * used for GVFS functionality + */ + +#endif /* GVFS_H */ From a191ed6c292452e3bbad96a86e5ae7689821ba9c Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Tue, 24 Jan 2017 17:34:12 +0100 Subject: [PATCH 005/104] gvfs: add the core.gvfs config setting This does not do anything yet. The next patches will add various values for that config setting that correspond to the various features offered/required by GVFS. Signed-off-by: Kevin Willford --- Documentation/config/core.txt | 3 +++ cache.h | 1 + config.c | 6 ++++++ environment.c | 1 + gvfs.h | 31 +++++++++++++++++++++++++++++++ 5 files changed, 42 insertions(+) diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index d615d1acb878ce..b6b30e935eff96 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -622,6 +622,9 @@ core.multiPackIndex:: single index. See link:technical/multi-pack-index.html[the multi-pack-index design document]. +core.gvfs:: + Enable the features needed for GVFS. + core.sparseCheckout:: Enable "sparse checkout" feature. See linkgit:git-sparse-checkout[1] for more information. diff --git a/cache.h b/cache.h index f98ae3d0931f16..56daebdf355d5b 100644 --- a/cache.h +++ b/cache.h @@ -955,6 +955,7 @@ extern char *git_replace_ref_base; extern int fsync_object_files; extern int core_preload_index; +extern int core_gvfs; extern int precomposed_unicode; extern int protect_hfs; extern int protect_ntfs; diff --git a/config.c b/config.c index 59c2bda33bacee..0ec4d8b402d5f2 100644 --- a/config.c +++ b/config.c @@ -20,6 +20,7 @@ #include "dir.h" #include "color.h" #include "refs.h" +#include "gvfs.h" struct config_source { struct config_source *prev; @@ -1374,6 +1375,11 @@ static int git_default_core_config(const char *var, const char *value, void *cb) return 0; } + if (!strcmp(var, "core.gvfs")) { + gvfs_load_config_value(value); + return 0; + } + if (!strcmp(var, "core.sparsecheckout")) { core_apply_sparse_checkout = git_config_bool(var, value); return 0; diff --git a/environment.c b/environment.c index aaca0e91ac8f4a..92692c2c05f74d 100644 --- a/environment.c +++ b/environment.c @@ -69,6 +69,7 @@ char *notes_ref_name; int grafts_replace_parents = 1; int core_apply_sparse_checkout; int core_sparse_checkout_cone; +int core_gvfs; int merge_log_config = -1; int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */ unsigned long pack_size_limit_cfg; diff --git a/gvfs.h b/gvfs.h index b6dbe85eae4071..2e9e10f4e49ef6 100644 --- a/gvfs.h +++ b/gvfs.h @@ -1,9 +1,40 @@ #ifndef GVFS_H #define GVFS_H +#include "cache.h" +#include "config.h" + /* * This file is for the specific settings and methods * used for GVFS functionality */ +static inline int gvfs_config_is_set(int mask) { + return (core_gvfs & mask) == mask; +} + +static inline int gvfs_config_is_set_any(void) { + return core_gvfs > 0; +} + +static inline void gvfs_load_config_value(const char *value) { + int is_bool = 0; + + if (value) + core_gvfs = git_config_bool_or_int("core.gvfs", value, &is_bool); + else + git_config_get_bool_or_int("core.gvfs", &is_bool, &core_gvfs); + + /* Turn on all bits if a bool was set in the settings */ + if (is_bool && core_gvfs) + core_gvfs = -1; +} + + +static inline int gvfs_config_load_and_is_set(int mask) { + gvfs_load_config_value(0); + return gvfs_config_is_set(mask); +} + + #endif /* GVFS_H */ From 65d8c2ab28dbe618f1c68a603c9280f398a58230 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Tue, 24 Jan 2017 17:38:59 +0100 Subject: [PATCH 006/104] gvfs: add the feature to skip writing the index' SHA-1 This takes a substantial amount of time, and if the user is reasonably sure that the files' integrity is not compromised, that time can be saved. Git no longer verifies the SHA-1 by default, anyway. Signed-off-by: Kevin Willford --- Documentation/config/core.txt | 10 +++++++++- gvfs.h | 6 ++++++ read-cache.c | 11 ++++++++--- t/t1016-read-tree-skip-sha-on-read.sh | 22 ++++++++++++++++++++++ 4 files changed, 45 insertions(+), 4 deletions(-) create mode 100755 t/t1016-read-tree-skip-sha-on-read.sh diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index b6b30e935eff96..6e7e39853a0cd4 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -623,7 +623,15 @@ core.multiPackIndex:: multi-pack-index design document]. core.gvfs:: - Enable the features needed for GVFS. + Enable the features needed for GVFS. This value can be set to true + to indicate all features should be turned on or the bit values listed + below can be used to turn on specific features. ++ +-- + GVFS_SKIP_SHA_ON_INDEX:: + Bit value 1 + Disables the calculation of the sha when writing the index +-- core.sparseCheckout:: Enable "sparse checkout" feature. See linkgit:git-sparse-checkout[1] diff --git a/gvfs.h b/gvfs.h index 2e9e10f4e49ef6..690419127a72dd 100644 --- a/gvfs.h +++ b/gvfs.h @@ -9,6 +9,12 @@ * used for GVFS functionality */ + +/* + * The list of bits in the core_gvfs setting + */ +#define GVFS_SKIP_SHA_ON_INDEX (1 << 0) + static inline int gvfs_config_is_set(int mask) { return (core_gvfs & mask) == mask; } diff --git a/read-cache.c b/read-cache.c index 7f66e0817148f8..381802b6dcbd64 100644 --- a/read-cache.c +++ b/read-cache.c @@ -25,6 +25,7 @@ #include "fsmonitor.h" #include "thread-utils.h" #include "progress.h" +#include "gvfs.h" /* Mask for the name length in ce_flags in the on-disk index */ @@ -2466,7 +2467,9 @@ static int ce_write_flush(git_hash_ctx *context, int fd) { unsigned int buffered = write_buffer_len; if (buffered) { - the_hash_algo->update_fn(context, write_buffer, buffered); + if (!gvfs_config_is_set(GVFS_SKIP_SHA_ON_INDEX)) + the_hash_algo->update_fn(context, write_buffer, + buffered); if (write_in_full(fd, write_buffer, buffered) < 0) return -1; write_buffer_len = 0; @@ -2515,7 +2518,8 @@ static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash) if (left) { write_buffer_len = 0; - the_hash_algo->update_fn(context, write_buffer, left); + if (!gvfs_config_is_set(GVFS_SKIP_SHA_ON_INDEX)) + the_hash_algo->update_fn(context, write_buffer, left); } /* Flush first if not enough space for hash signature */ @@ -2526,7 +2530,8 @@ static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash) } /* Append the hash signature at the end */ - the_hash_algo->final_fn(write_buffer + left, context); + if (!gvfs_config_is_set(GVFS_SKIP_SHA_ON_INDEX)) + the_hash_algo->final_fn(write_buffer + left, context); hashcpy(hash, write_buffer + left); left += the_hash_algo->rawsz; return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0; diff --git a/t/t1016-read-tree-skip-sha-on-read.sh b/t/t1016-read-tree-skip-sha-on-read.sh new file mode 100755 index 00000000000000..5b76a80a0020dc --- /dev/null +++ b/t/t1016-read-tree-skip-sha-on-read.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +test_description='check that read-tree works with core.gvfs config value' + +. ./test-lib.sh +. "$TEST_DIRECTORY"/lib-read-tree.sh + +test_expect_success setup ' + echo one >a && + git add a && + git commit -m initial +' +test_expect_success 'read-tree without core.gvsf' ' + read_tree_u_must_succeed -m -u HEAD +' + +test_expect_success 'read-tree with core.gvfs set to 1' ' + git config core.gvfs 1 && + read_tree_u_must_succeed -m -u HEAD +' + +test_done From 18135adc0a960aeeb9c65e65f91ed9194c313784 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Tue, 24 Jan 2017 17:54:55 +0100 Subject: [PATCH 007/104] gvfs: add the feature that blobs may be missing Signed-off-by: Kevin Willford --- Documentation/config/core.txt | 4 ++++ cache-tree.c | 4 +++- gvfs.h | 1 + t/t0000-basic.sh | 5 +++++ 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index 6e7e39853a0cd4..19c7f66ed1aaf4 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -631,6 +631,10 @@ core.gvfs:: GVFS_SKIP_SHA_ON_INDEX:: Bit value 1 Disables the calculation of the sha when writing the index + GVFS_MISSING_OK:: + Bit value 4 + Normally git write-tree ensures that the objects referenced by the + directory exist in the object database. This option disables this check. -- core.sparseCheckout:: diff --git a/cache-tree.c b/cache-tree.c index a537a806c16e03..9a59dafa26a07d 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -6,6 +6,7 @@ #include "object-store.h" #include "replace-object.h" #include "promisor-remote.h" +#include "gvfs.h" #ifndef DEBUG_CACHE_TREE #define DEBUG_CACHE_TREE 0 @@ -244,7 +245,8 @@ static int update_one(struct cache_tree *it, int flags) { struct strbuf buffer; - int missing_ok = flags & WRITE_TREE_MISSING_OK; + int missing_ok = gvfs_config_is_set(GVFS_MISSING_OK) ? + WRITE_TREE_MISSING_OK : (flags & WRITE_TREE_MISSING_OK); int dryrun = flags & WRITE_TREE_DRY_RUN; int repair = flags & WRITE_TREE_REPAIR; int to_invalidate = 0; diff --git a/gvfs.h b/gvfs.h index 690419127a72dd..dabbf67f94c852 100644 --- a/gvfs.h +++ b/gvfs.h @@ -14,6 +14,7 @@ * The list of bits in the core_gvfs setting */ #define GVFS_SKIP_SHA_ON_INDEX (1 << 0) +#define GVFS_MISSING_OK (1 << 2) static inline int gvfs_config_is_set(int mask) { return (core_gvfs & mask) == mask; diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh index 2ff176cd5d954c..3ee88792c965bd 100755 --- a/t/t0000-basic.sh +++ b/t/t0000-basic.sh @@ -1179,6 +1179,11 @@ test_expect_success 'writing this tree with --missing-ok' ' git write-tree --missing-ok ' +test_expect_success 'writing this tree with missing ok config value' ' + git config core.gvfs 4 && + git write-tree +' + ################################################################ test_expect_success 'git read-tree followed by write-tree should be idempotent' ' From 64985fc31fa38748729fa65d422a6a29dca65e72 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Wed, 18 May 2016 13:40:39 +0000 Subject: [PATCH 008/104] gvfs: prevent files to be deleted outside the sparse checkout Prevent the sparse checkout to delete files that were marked with skip-worktree bit and are not in the sparse-checkout file. This is because everything with the skip-worktree bit turned on is being virtualized and will be removed with the change of HEAD. There was only one failing test when running with these changes that was checking to make sure the worktree narrows on checkout which was expected since we would no longer be narrowing the worktree. Signed-off-by: Kevin Willford --- Documentation/config/core.txt | 9 +++++++++ gvfs.h | 1 + t/t1090-sparse-checkout-scope.sh | 17 +++++++++++++++++ unpack-trees.c | 22 ++++++++++++++++++++++ 4 files changed, 49 insertions(+) diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index 19c7f66ed1aaf4..eb6acc69f415d8 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -635,6 +635,15 @@ core.gvfs:: Bit value 4 Normally git write-tree ensures that the objects referenced by the directory exist in the object database. This option disables this check. + GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT:: + Bit value 8 + When marking entries to remove from the index and the working + directory this option will take into account what the + skip-worktree bit was set to so that if the entry has the + skip-worktree bit set it will not be removed from the working + directory. This will allow virtualized working directories to + detect the change to HEAD and use the new commit tree to show + the files that are in the working directory. -- core.sparseCheckout:: diff --git a/gvfs.h b/gvfs.h index dabbf67f94c852..f9144d2ad9ab92 100644 --- a/gvfs.h +++ b/gvfs.h @@ -15,6 +15,7 @@ */ #define GVFS_SKIP_SHA_ON_INDEX (1 << 0) #define GVFS_MISSING_OK (1 << 2) +#define GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT (1 << 3) static inline int gvfs_config_is_set(int mask) { return (core_gvfs & mask) == mask; diff --git a/t/t1090-sparse-checkout-scope.sh b/t/t1090-sparse-checkout-scope.sh index ae723732777348..ac786c7e7af010 100755 --- a/t/t1090-sparse-checkout-scope.sh +++ b/t/t1090-sparse-checkout-scope.sh @@ -82,6 +82,23 @@ test_expect_success 'in partial clone, sparse checkout only fetches needed blobs test_cmp expect actual ' +test_expect_success 'checkout does not delete items outside the sparse checkout file' ' + git checkout master && + git config core.gvfs 8 && + git checkout -b outside && + echo "new file1" >d && + git add d && + git commit -m "branch initial" && + echo "new file1" >e && + git add e && + git commit -m "skipped worktree" && + git update-index --skip-worktree e && + echo "/d" >.git/info/sparse-checkout && + git checkout HEAD^ && + test_path_is_file d && + test_path_is_file e +' + test_expect_success MINGW 'no unnecessary opendir() with fscache' ' git clone . fscache-test && ( diff --git a/unpack-trees.c b/unpack-trees.c index 8b92e9a3f81ebd..84d170d1eca9f0 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -16,6 +16,7 @@ #include "fsmonitor.h" #include "object-store.h" #include "promisor-remote.h" +#include "gvfs.h" /* * Error messages expected by scripts out of plumbing commands such as @@ -2240,6 +2241,27 @@ static int deleted_entry(const struct cache_entry *ce, } if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o)) return -1; + + /* + * When marking entries to remove from the index and the working + * directory this option will take into account what the + * skip-worktree bit was set to so that if the entry has the + * skip-worktree bit set it will not be removed from the working + * directory. This will allow virtualized working directories to + * detect the change to HEAD and use the new commit tree to show + * the files that are in the working directory. + * + * old is the cache_entry that will have the skip-worktree bit set + * which will need to be preserved when the CE_REMOVE entry is added + */ + if (gvfs_config_is_set(GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT) && + old && + old->ce_flags & CE_SKIP_WORKTREE) { + add_entry(o, old, CE_REMOVE, 0); + invalidate_ce_path(old, o); + return 1; + } + add_entry(o, ce, CE_REMOVE, 0); invalidate_ce_path(ce, o); return 1; From 4e8499ce8e74a2c154619f9e57ed81a069f7fb40 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Mon, 30 May 2016 10:55:53 -0400 Subject: [PATCH 009/104] gvfs: optionally skip reachability checks/upload pack during fetch While performing a fetch with a virtual file system we know that there will be missing objects and we don't want to download them just because of the reachability of the commits. We also don't want to download a pack file with commits, trees, and blobs since these will be downloaded on demand. This flag will skip the first connectivity check and by returning zero will skip the upload pack. It will also skip the second connectivity check but continue to update the branches to the latest commit ids. Signed-off-by: Kevin Willford --- Documentation/config/core.txt | 9 +++++++++ connected.c | 19 +++++++++++++++++++ gvfs.h | 1 + t/t5582-vfs.sh | 24 ++++++++++++++++++++++++ 4 files changed, 53 insertions(+) create mode 100755 t/t5582-vfs.sh diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index eb6acc69f415d8..87eb3e5908e337 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -644,6 +644,15 @@ core.gvfs:: directory. This will allow virtualized working directories to detect the change to HEAD and use the new commit tree to show the files that are in the working directory. + GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK:: + Bit value 16 + While performing a fetch with a virtual file system we know + that there will be missing objects and we don't want to download + them just because of the reachability of the commits. We also + don't want to download a pack file with commits, trees, and blobs + since these will be downloaded on demand. This flag will skip the + checks on the reachability of objects during a fetch as well as + the upload pack so that extraneous objects don't get downloaded. -- core.sparseCheckout:: diff --git a/connected.c b/connected.c index 3135b71e1961ae..167f4b8384272d 100644 --- a/connected.c +++ b/connected.c @@ -6,6 +6,7 @@ #include "transport.h" #include "packfile.h" #include "promisor-remote.h" +#include "gvfs.h" /* * If we feed all the commits we want to verify to this command @@ -31,6 +32,24 @@ int check_connected(oid_iterate_fn fn, void *cb_data, size_t base_len; const unsigned hexsz = the_hash_algo->hexsz; + /* + * Running a virtual file system there will be objects that are + * missing locally and we don't want to download a bunch of + * commits, trees, and blobs just to make sure everything is + * reachable locally so this option will skip reachablility + * checks below that use rev-list. This will stop the check + * before uploadpack runs to determine if there is anything to + * fetch. Returning zero for the first check will also prevent the + * uploadpack from happening. It will also skip the check after + * the fetch is finished to make sure all the objects where + * downloaded in the pack file. This will allow the fetch to + * run and get all the latest tip commit ids for all the branches + * in the fetch but not pull down commits, trees, or blobs via + * upload pack. + */ + if (gvfs_config_is_set(GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK)) + return 0; + if (!opt) opt = &defaults; transport = opt->transport; diff --git a/gvfs.h b/gvfs.h index f9144d2ad9ab92..8ee12df31c8a1d 100644 --- a/gvfs.h +++ b/gvfs.h @@ -16,6 +16,7 @@ #define GVFS_SKIP_SHA_ON_INDEX (1 << 0) #define GVFS_MISSING_OK (1 << 2) #define GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT (1 << 3) +#define GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK (1 << 4) static inline int gvfs_config_is_set(int mask) { return (core_gvfs & mask) == mask; diff --git a/t/t5582-vfs.sh b/t/t5582-vfs.sh new file mode 100755 index 00000000000000..8a703cbb640387 --- /dev/null +++ b/t/t5582-vfs.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +test_description='fetch using the flag to skip reachability and upload pack' + +. ./test-lib.sh + + +test_expect_success setup ' + echo inital >a && + git add a && + git commit -m initial && + git clone . one +' + +test_expect_success "fetch test" ' + cd one && + git config core.gvfs 16 && + rm -rf .git/objects/* && + git -C .. cat-file commit HEAD | git hash-object -w --stdin -t commit && + git fetch && + test_must_fail git rev-parse --verify HEAD^{tree} +' + +test_done \ No newline at end of file From 63c4477e767304a860801316cb85a70ff324f188 Mon Sep 17 00:00:00 2001 From: Ben Peart Date: Wed, 15 Jun 2016 14:59:16 +0000 Subject: [PATCH 010/104] gvfs: ensure all filters and EOL conversions are blocked Ensure all filters and EOL conversions are blocked when running under GVFS so that our projected file sizes will match the actual file size when it is hydrated on the local machine. Signed-off-by: Ben Peart --- Documentation/config/core.txt | 9 +++++++++ convert.c | 22 +++++++++++++++++++++ gvfs.h | 1 + t/t0021-conversion.sh | 37 +++++++++++++++++++++++++++++++++++ t/t0027-auto-crlf.sh | 12 ++++++++++++ 5 files changed, 81 insertions(+) diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index 87eb3e5908e337..c75ae0171e9365 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -653,6 +653,15 @@ core.gvfs:: since these will be downloaded on demand. This flag will skip the checks on the reachability of objects during a fetch as well as the upload pack so that extraneous objects don't get downloaded. + GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS:: + Bit value 64 + With a virtual file system we only know the file size before any + CRLF or smudge/clean filters processing is done on the client. + To prevent file corruption due to truncation or expansion with + garbage at the end, these filters must not run when the file + is first accessed and brought down to the client. Git.exe can't + currently tell the first access vs subsequent accesses so this + flag just blocks them from occurring at all. -- core.sparseCheckout:: diff --git a/convert.c b/convert.c index 572449825c5a4c..4adb0dd65850d4 100644 --- a/convert.c +++ b/convert.c @@ -9,6 +9,7 @@ #include "sub-process.h" #include "utf8.h" #include "ll-merge.h" +#include "gvfs.h" /* * convert.c - convert a file when checking it out and checking it in. @@ -559,6 +560,9 @@ static int crlf_to_git(const struct index_state *istate, if (!buf) return 1; + if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS)) + die("CRLF conversions not supported when running under GVFS"); + /* only grow if not in place */ if (strbuf_avail(buf) + buf->len < len) strbuf_grow(buf, len - buf->len); @@ -598,6 +602,9 @@ static int crlf_to_worktree(const char *src, size_t len, if (!will_convert_lf_to_crlf(&stats, crlf_action)) return 0; + if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS)) + die("CRLF conversions not supported when running under GVFS"); + /* are we "faking" in place editing ? */ if (src == buf->buf) to_free = strbuf_detach(buf, NULL); @@ -712,6 +719,9 @@ static int apply_single_file_filter(const char *path, const char *src, size_t le struct async async; struct filter_params params; + if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS)) + die("Filter \"%s\" not supported when running under GVFS", cmd); + memset(&async, 0, sizeof(async)); async.proc = filter_buffer_or_fd; async.data = ¶ms; @@ -1116,6 +1126,9 @@ static int ident_to_git(const char *src, size_t len, if (!buf) return 1; + if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS)) + die("ident conversions not supported when running under GVFS"); + /* only grow if not in place */ if (strbuf_avail(buf) + buf->len < len) strbuf_grow(buf, len - buf->len); @@ -1163,6 +1176,9 @@ static int ident_to_worktree(const char *src, size_t len, if (!cnt) return 0; + if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS)) + die("ident conversions not supported when running under GVFS"); + /* are we "faking" in place editing ? */ if (src == buf->buf) to_free = strbuf_detach(buf, NULL); @@ -1617,6 +1633,9 @@ static int lf_to_crlf_filter_fn(struct stream_filter *filter, size_t count, o = 0; struct lf_to_crlf_filter *lf_to_crlf = (struct lf_to_crlf_filter *)filter; + if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS)) + die("CRLF conversions not supported when running under GVFS"); + /* * We may be holding onto the CR to see if it is followed by a * LF, in which case we would need to go to the main loop. @@ -1861,6 +1880,9 @@ static int ident_filter_fn(struct stream_filter *filter, struct ident_filter *ident = (struct ident_filter *)filter; static const char head[] = "$Id"; + if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS)) + die("ident conversions not supported when running under GVFS"); + if (!input) { /* drain upon eof */ switch (ident->state) { diff --git a/gvfs.h b/gvfs.h index 8ee12df31c8a1d..2d6de575bf4a65 100644 --- a/gvfs.h +++ b/gvfs.h @@ -17,6 +17,7 @@ #define GVFS_MISSING_OK (1 << 2) #define GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT (1 << 3) #define GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK (1 << 4) +#define GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS (1 << 6) static inline int gvfs_config_is_set(int mask) { return (core_gvfs & mask) == mask; diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh index 5508e0bf6fbbb3..75e5d77435b4f8 100755 --- a/t/t0021-conversion.sh +++ b/t/t0021-conversion.sh @@ -314,6 +314,43 @@ test_expect_success "filter: smudge empty file" ' test_cmp expected filtered-empty-in-repo ' +test_expect_success "filter: clean filters blocked when under GVFS" ' + test_config filter.empty-in-repo.clean "cat >/dev/null" && + test_config filter.empty-in-repo.smudge "echo smudged && cat" && + test_config core.gvfs 64 && + + echo dead data walking >empty-in-repo && + test_must_fail git add empty-in-repo +' + +test_expect_success "filter: smudge filters blocked when under GVFS" ' + test_config filter.empty-in-repo.clean "cat >/dev/null" && + test_config filter.empty-in-repo.smudge "echo smudged && cat" && + test_config core.gvfs 64 && + + test_must_fail git checkout +' + +test_expect_success "ident blocked on add when under GVFS" ' + test_config core.gvfs 64 && + test_config core.autocrlf false && + + echo "*.i ident" >.gitattributes && + echo "\$Id\$" > ident.i && + + test_must_fail git add ident.i +' + +test_expect_success "ident blocked when under GVFS" ' + git add ident.i && + + git commit -m "added ident.i" && + test_config core.gvfs 64 && + rm ident.i && + + test_must_fail git checkout -- ident.i +' + test_expect_success 'disable filter with empty override' ' test_config_global filter.disable.smudge false && test_config_global filter.disable.clean false && diff --git a/t/t0027-auto-crlf.sh b/t/t0027-auto-crlf.sh index 9fcd56fab37314..4d43e15bb96727 100755 --- a/t/t0027-auto-crlf.sh +++ b/t/t0027-auto-crlf.sh @@ -333,6 +333,18 @@ checkout_files () { " } +test_expect_success 'crlf conversions blocked when under GVFS' ' + git checkout -b gvfs && + test_commit initial && + rm initial.t && + test_config core.gvfs 64 && + test_config core.autocrlf true && + test_must_fail git read-tree --reset -u HEAD && + + git config core.autocrlf false && + git read-tree --reset -u HEAD +' + # Test control characters # NUL SOH CR EOF==^Z test_expect_success 'ls-files --eol -o Text/Binary' ' From 9ea9339971b674064caf9c396be037b62d5be99f Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Wed, 30 Nov 2016 23:11:36 +0100 Subject: [PATCH 011/104] Add a new run_hook_argv() function The two existing members of the run_hook*() family, run_hook_ve() and run_hook_le(), are good for callers that know the precise number of parameters already. Let's introduce a new sibling that takes an argv array for callers that want to pass a variable number of parameters. Signed-off-by: Johannes Schindelin --- run-command.c | 20 +++++++++++++++++--- run-command.h | 1 + 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/run-command.c b/run-command.c index 9b3a57d1e392c8..5aed5228a590ec 100644 --- a/run-command.c +++ b/run-command.c @@ -1342,7 +1342,8 @@ const char *find_hook(const char *name) return path.buf; } -int run_hook_ve(const char *const *env, const char *name, va_list args) +int run_hook_argv(const char *const *env, const char *name, + const char **argv) { struct child_process hook = CHILD_PROCESS_INIT; const char *p; @@ -1352,8 +1353,7 @@ int run_hook_ve(const char *const *env, const char *name, va_list args) return 0; argv_array_push(&hook.args, p); - while ((p = va_arg(args, const char *))) - argv_array_push(&hook.args, p); + argv_array_pushv(&hook.args, argv); hook.env = env; hook.no_stdin = 1; hook.stdout_to_stderr = 1; @@ -1362,6 +1362,20 @@ int run_hook_ve(const char *const *env, const char *name, va_list args) return run_command(&hook); } +int run_hook_ve(const char *const *env, const char *name, va_list args) +{ + struct argv_array argv = ARGV_ARRAY_INIT; + const char *p; + int ret; + + while ((p = va_arg(args, const char *))) + argv_array_push(&argv, p); + + ret = run_hook_argv(env, name, argv.argv); + argv_array_clear(&argv); + return ret; +} + int run_hook_le(const char *const *env, const char *name, ...) { va_list args; diff --git a/run-command.h b/run-command.h index 191dfcdafe3dfc..fda5c151ea1819 100644 --- a/run-command.h +++ b/run-command.h @@ -217,6 +217,7 @@ const char *find_hook(const char *name); LAST_ARG_MUST_BE_NULL int run_hook_le(const char *const *env, const char *name, ...); int run_hook_ve(const char *const *env, const char *name, va_list args); +int run_hook_argv(const char *const *env, const char *name, const char **argv); /* * Trigger an auto-gc From e00561c4cad514dca96368294c99afcd6ba41c85 Mon Sep 17 00:00:00 2001 From: Ben Peart Date: Tue, 10 Jan 2017 18:47:14 +0000 Subject: [PATCH 012/104] gvfs: allow "virtualizing" objects The idea is to allow blob objects to be missing from the local repository, and to load them lazily on demand. After discussing this idea on the mailing list, we will rename the feature to "lazy clone" and work more on this. Signed-off-by: Ben Peart --- cache.h | 2 ++ config.c | 5 +++++ connected.c | 2 ++ environment.c | 1 + sha1-file.c | 22 ++++++++++++++++++++++ 5 files changed, 32 insertions(+) diff --git a/cache.h b/cache.h index 56daebdf355d5b..523e59417bbf7f 100644 --- a/cache.h +++ b/cache.h @@ -984,6 +984,8 @@ int use_optional_locks(void); extern char comment_line_char; extern int auto_comment_line_char; +extern int core_virtualize_objects; + enum log_refs_config { LOG_REFS_UNSET = -1, LOG_REFS_NONE = 0, diff --git a/config.c b/config.c index 0ec4d8b402d5f2..7489793a2a5a04 100644 --- a/config.c +++ b/config.c @@ -1410,6 +1410,11 @@ static int git_default_core_config(const char *var, const char *value, void *cb) return 0; } + if (!strcmp(var, "core.virtualizeobjects")) { + core_virtualize_objects = git_config_bool(var, value); + return 0; + } + /* Add other config variables here and to Documentation/config.txt. */ return platform_core_config(var, value, cb); } diff --git a/connected.c b/connected.c index 167f4b8384272d..1c7df18e145051 100644 --- a/connected.c +++ b/connected.c @@ -49,6 +49,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data, */ if (gvfs_config_is_set(GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK)) return 0; + if (core_virtualize_objects) + return 0; if (!opt) opt = &defaults; diff --git a/environment.c b/environment.c index 92692c2c05f74d..bca4ade1a7e8d6 100644 --- a/environment.c +++ b/environment.c @@ -73,6 +73,7 @@ int core_gvfs; int merge_log_config = -1; int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */ unsigned long pack_size_limit_cfg; +int core_virtualize_objects; enum log_refs_config log_all_ref_updates = LOG_REFS_UNSET; #ifndef PROTECT_HFS_DEFAULT diff --git a/sha1-file.c b/sha1-file.c index ccd34dd9e8ce9c..70d2fc5a990227 100644 --- a/sha1-file.c +++ b/sha1-file.c @@ -1454,6 +1454,21 @@ void disable_obj_read_lock(void) pthread_mutex_destroy(&obj_read_mutex); } +static int run_read_object_hook(const struct object_id *oid) +{ + struct argv_array args = ARGV_ARRAY_INIT; + int ret; + uint64_t start; + + start = getnanotime(); + argv_array_push(&args, oid_to_hex(oid)); + ret = run_hook_argv(NULL, "read-object", args.argv); + argv_array_clear(&args); + trace_performance_since(start, "run_read_object_hook"); + + return ret; +} + int fetch_if_missing = 1; static int do_oid_object_info_extended(struct repository *r, @@ -1466,6 +1481,7 @@ static int do_oid_object_info_extended(struct repository *r, int rtype; const struct object_id *real = oid; int already_retried = 0; + int tried_hook = 0; if (flags & OBJECT_INFO_LOOKUP_REPLACE) @@ -1477,6 +1493,7 @@ static int do_oid_object_info_extended(struct repository *r, if (!oi) oi = &blank_oi; +retry: co = find_cached_object(real); if (co) { if (oi->typep) @@ -1511,6 +1528,11 @@ static int do_oid_object_info_extended(struct repository *r, reprepare_packed_git(r); if (find_pack_entry(r, real, &e)) break; + if (core_virtualize_objects && !tried_hook) { + tried_hook = 1; + if (!run_read_object_hook(oid)) + goto retry; + } } /* Check if it is a missing object */ From a04f755876bc8979cf46f1a12d958912f7363c82 Mon Sep 17 00:00:00 2001 From: Ben Peart Date: Wed, 15 Mar 2017 18:43:05 +0000 Subject: [PATCH 013/104] Hydrate missing loose objects in check_and_freshen() Hydrate missing loose objects in check_and_freshen() when running virtualized. Add test cases to verify read-object hook works when running virtualized. This hook is called in check_and_freshen() rather than check_and_freshen_local() to make the hook work also with alternates. Helped-by: Kevin Willford Signed-off-by: Ben Peart --- sha1-file.c | 46 +++++++++++++++++++++++++++--------------- t/t0411-read-object.sh | 27 +++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 16 deletions(-) create mode 100755 t/t0411-read-object.sh diff --git a/sha1-file.c b/sha1-file.c index 70d2fc5a990227..e8bc538ac94b7b 100644 --- a/sha1-file.c +++ b/sha1-file.c @@ -878,6 +878,24 @@ void prepare_alt_odb(struct repository *r) r->objects->loaded_alternates = 1; } +static int run_read_object_hook(const struct object_id *oid) +{ + struct child_process hook = CHILD_PROCESS_INIT; + const char *p; + + p = find_hook("read-object"); + if (!p) + return 1; + + argv_array_push(&hook.args, p); + argv_array_push(&hook.args, oid_to_hex(oid)); + hook.env = NULL; + hook.no_stdin = 1; + hook.stdout_to_stderr = 1; + + return run_command(&hook); +} + /* Returns 1 if we have successfully freshened the file, 0 otherwise. */ static int freshen_file(const char *fn) { @@ -928,8 +946,19 @@ static int check_and_freshen_nonlocal(const struct object_id *oid, int freshen) static int check_and_freshen(const struct object_id *oid, int freshen) { - return check_and_freshen_local(oid, freshen) || + int ret; + int tried_hook = 0; + +retry: + ret = check_and_freshen_local(oid, freshen) || check_and_freshen_nonlocal(oid, freshen); + if (!ret && core_virtualize_objects && !tried_hook) { + tried_hook = 1; + if (!run_read_object_hook(oid)) + goto retry; + } + + return ret; } int has_loose_object_nonlocal(const struct object_id *oid) @@ -1454,21 +1483,6 @@ void disable_obj_read_lock(void) pthread_mutex_destroy(&obj_read_mutex); } -static int run_read_object_hook(const struct object_id *oid) -{ - struct argv_array args = ARGV_ARRAY_INIT; - int ret; - uint64_t start; - - start = getnanotime(); - argv_array_push(&args, oid_to_hex(oid)); - ret = run_hook_argv(NULL, "read-object", args.argv); - argv_array_clear(&args); - trace_performance_since(start, "run_read_object_hook"); - - return ret; -} - int fetch_if_missing = 1; static int do_oid_object_info_extended(struct repository *r, diff --git a/t/t0411-read-object.sh b/t/t0411-read-object.sh new file mode 100755 index 00000000000000..0c3bfea1886cf9 --- /dev/null +++ b/t/t0411-read-object.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +test_description='tests for read-object hook' + +. ./test-lib.sh + +test_expect_success 'setup host and guest repos' ' + test_commit zero && + hash1=$(git ls-tree HEAD | grep zero.t | cut -f1 | cut -d\ -f3) && + git init guest-repo && + cd guest-repo && + git config core.virtualizeobjects true && + write_script .git/hooks/read-object <<-\EOF + # pass core.virtualizeobjects=false so we dont end up calling the hook proc recursively + git --git-dir=../.git/ cat-file blob "$1" | git -c core.virtualizeobjects=false hash-object -w --stdin >/dev/null 2>&1 + EOF +' + +test_expect_success 'blobs can be retrieved from the host repo' ' + git cat-file blob "$hash1" +' + +test_expect_success 'invalid blobs generate errors' ' + test_must_fail git cat-file blob "invalid" +' + +test_done From 26cf7c24f2090ba13f958ebd433a3449b875eafe Mon Sep 17 00:00:00 2001 From: Ben Peart Date: Tue, 18 Jul 2017 12:04:44 +0200 Subject: [PATCH 014/104] Add support for read-object as a background process to retrieve missing objects This commit converts the existing read_object hook proc model for downloading missing blobs to use a background process that is started the first time git encounters a missing blob and stays running until git exits. Git and the read-object process communicate via stdin/stdout and a versioned, capability negotiated interface as documented in Documentation/technical/read-object-protocol.txt. The advantage of this over the previous hook proc is that it saves the overhead of spawning a new hook process for every missing blob. The model for the background process was refactored from the recent git LFS work. I refactored that code into a shared module (sub-process.c/h) and then updated convert.c to consume the new library. I then used the same sub-process module when implementing the read-object background process. Signed-off-by: Ben Peart --- .../technical/read-object-protocol.txt | 102 +++++++++++++++ contrib/long-running-read-object/example.pl | 114 ++++++++++++++++ sha1-file.c | 122 ++++++++++++++++-- t/t0410/read-object | 114 ++++++++++++++++ t/t0411-read-object.sh | 24 ++-- 5 files changed, 450 insertions(+), 26 deletions(-) create mode 100644 Documentation/technical/read-object-protocol.txt create mode 100644 contrib/long-running-read-object/example.pl create mode 100755 t/t0410/read-object diff --git a/Documentation/technical/read-object-protocol.txt b/Documentation/technical/read-object-protocol.txt new file mode 100644 index 00000000000000..a893b46e7c28a9 --- /dev/null +++ b/Documentation/technical/read-object-protocol.txt @@ -0,0 +1,102 @@ +Read Object Process +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The read-object process enables Git to read all missing blobs with a +single process invocation for the entire life of a single Git command. +This is achieved by using a packet format (pkt-line, see technical/ +protocol-common.txt) based protocol over standard input and standard +output as follows. All packets, except for the "*CONTENT" packets and +the "0000" flush packet, are considered text and therefore are +terminated by a LF. + +Git starts the process when it encounters the first missing object that +needs to be retrieved. After the process is started, Git sends a welcome +message ("git-read-object-client"), a list of supported protocol version +numbers, and a flush packet. Git expects to read a welcome response +message ("git-read-object-server"), exactly one protocol version number +from the previously sent list, and a flush packet. All further +communication will be based on the selected version. + +The remaining protocol description below documents "version=1". Please +note that "version=42" in the example below does not exist and is only +there to illustrate how the protocol would look with more than one +version. + +After the version negotiation Git sends a list of all capabilities that +it supports and a flush packet. Git expects to read a list of desired +capabilities, which must be a subset of the supported capabilities list, +and a flush packet as response: +------------------------ +packet: git> git-read-object-client +packet: git> version=1 +packet: git> version=42 +packet: git> 0000 +packet: git< git-read-object-server +packet: git< version=1 +packet: git< 0000 +packet: git> capability=get +packet: git> capability=have +packet: git> capability=put +packet: git> capability=not-yet-invented +packet: git> 0000 +packet: git< capability=get +packet: git< 0000 +------------------------ +The only supported capability in version 1 is "get". + +Afterwards Git sends a list of "key=value" pairs terminated with a flush +packet. The list will contain at least the command (based on the +supported capabilities) and the sha1 of the object to retrieve. Please +note, that the process must not send any response before it received the +final flush packet. + +When the process receives the "get" command, it should make the requested +object available in the git object store and then return success. Git will +then check the object store again and this time find it and proceed. +------------------------ +packet: git> command=get +packet: git> sha1=0a214a649e1b3d5011e14a3dc227753f2bd2be05 +packet: git> 0000 +------------------------ + +The process is expected to respond with a list of "key=value" pairs +terminated with a flush packet. If the process does not experience +problems then the list must contain a "success" status. +------------------------ +packet: git< status=success +packet: git< 0000 +------------------------ + +In case the process cannot or does not want to process the content, it +is expected to respond with an "error" status. +------------------------ +packet: git< status=error +packet: git< 0000 +------------------------ + +In case the process cannot or does not want to process the content as +well as any future content for the lifetime of the Git process, then it +is expected to respond with an "abort" status at any point in the +protocol. +------------------------ +packet: git< status=abort +packet: git< 0000 +------------------------ + +Git neither stops nor restarts the process in case the "error"/"abort" +status is set. + +If the process dies during the communication or does not adhere to the +protocol then Git will stop the process and restart it with the next +object that needs to be processed. + +After the read-object process has processed an object it is expected to +wait for the next "key=value" list containing a command. Git will close +the command pipe on exit. The process is expected to detect EOF and exit +gracefully on its own. Git will wait until the process has stopped. + +A long running read-object process demo implementation can be found in +`contrib/long-running-read-object/example.pl` located in the Git core +repository. If you develop your own long running process then the +`GIT_TRACE_PACKET` environment variables can be very helpful for +debugging (see linkgit:git[1]). diff --git a/contrib/long-running-read-object/example.pl b/contrib/long-running-read-object/example.pl new file mode 100644 index 00000000000000..b8f37f836a813c --- /dev/null +++ b/contrib/long-running-read-object/example.pl @@ -0,0 +1,114 @@ +#!/usr/bin/perl +# +# Example implementation for the Git read-object protocol version 1 +# See Documentation/technical/read-object-protocol.txt +# +# Allows you to test the ability for blobs to be pulled from a host git repo +# "on demand." Called when git needs a blob it couldn't find locally due to +# a lazy clone that only cloned the commits and trees. +# +# A lazy clone can be simulated via the following commands from the host repo +# you wish to create a lazy clone of: +# +# cd /host_repo +# git rev-parse HEAD +# git init /guest_repo +# git cat-file --batch-check --batch-all-objects | grep -v 'blob' | +# cut -d' ' -f1 | git pack-objects /guest_repo/.git/objects/pack/noblobs +# cd /guest_repo +# git config core.virtualizeobjects true +# git reset --hard +# +# Please note, this sample is a minimal skeleton. No proper error handling +# was implemented. +# + +use strict; +use warnings; + +# +# Point $DIR to the folder where your host git repo is located so we can pull +# missing objects from it +# +my $DIR = "/host_repo/.git/"; + +sub packet_bin_read { + my $buffer; + my $bytes_read = read STDIN, $buffer, 4; + if ( $bytes_read == 0 ) { + + # EOF - Git stopped talking to us! + exit(); + } + elsif ( $bytes_read != 4 ) { + die "invalid packet: '$buffer'"; + } + my $pkt_size = hex($buffer); + if ( $pkt_size == 0 ) { + return ( 1, "" ); + } + elsif ( $pkt_size > 4 ) { + my $content_size = $pkt_size - 4; + $bytes_read = read STDIN, $buffer, $content_size; + if ( $bytes_read != $content_size ) { + die "invalid packet ($content_size bytes expected; $bytes_read bytes read)"; + } + return ( 0, $buffer ); + } + else { + die "invalid packet size: $pkt_size"; + } +} + +sub packet_txt_read { + my ( $res, $buf ) = packet_bin_read(); + unless ( $buf =~ s/\n$// ) { + die "A non-binary line MUST be terminated by an LF."; + } + return ( $res, $buf ); +} + +sub packet_bin_write { + my $buf = shift; + print STDOUT sprintf( "%04x", length($buf) + 4 ); + print STDOUT $buf; + STDOUT->flush(); +} + +sub packet_txt_write { + packet_bin_write( $_[0] . "\n" ); +} + +sub packet_flush { + print STDOUT sprintf( "%04x", 0 ); + STDOUT->flush(); +} + +( packet_txt_read() eq ( 0, "git-read-object-client" ) ) || die "bad initialize"; +( packet_txt_read() eq ( 0, "version=1" ) ) || die "bad version"; +( packet_bin_read() eq ( 1, "" ) ) || die "bad version end"; + +packet_txt_write("git-read-object-server"); +packet_txt_write("version=1"); +packet_flush(); + +( packet_txt_read() eq ( 0, "capability=get" ) ) || die "bad capability"; +( packet_bin_read() eq ( 1, "" ) ) || die "bad capability end"; + +packet_txt_write("capability=get"); +packet_flush(); + +while (1) { + my ($command) = packet_txt_read() =~ /^command=([^=]+)$/; + + if ( $command eq "get" ) { + my ($sha1) = packet_txt_read() =~ /^sha1=([0-9a-f]{40})$/; + packet_bin_read(); + + system ('git --git-dir="' . $DIR . '" cat-file blob ' . $sha1 . ' | git -c core.virtualizeobjects=false hash-object -w --stdin >/dev/null 2>&1'); + packet_txt_write(($?) ? "status=error" : "status=success"); + packet_flush(); + } else { + die "bad command '$command'"; + } +} diff --git a/sha1-file.c b/sha1-file.c index e8bc538ac94b7b..5253726d5889d2 100644 --- a/sha1-file.c +++ b/sha1-file.c @@ -32,6 +32,9 @@ #include "packfile.h" #include "object-store.h" #include "promisor-remote.h" +#include "sigchain.h" +#include "sub-process.h" +#include "pkt-line.h" /* The maximum size for an object header. */ #define MAX_HEADER_LEN 32 @@ -878,22 +881,113 @@ void prepare_alt_odb(struct repository *r) r->objects->loaded_alternates = 1; } -static int run_read_object_hook(const struct object_id *oid) +#define CAP_GET (1u<<0) + +static int subprocess_map_initialized; +static struct hashmap subprocess_map; + +struct read_object_process { + struct subprocess_entry subprocess; + unsigned int supported_capabilities; +}; + +static int start_read_object_fn(struct subprocess_entry *subprocess) { - struct child_process hook = CHILD_PROCESS_INIT; - const char *p; + struct read_object_process *entry = (struct read_object_process *)subprocess; + static int versions[] = {1, 0}; + static struct subprocess_capability capabilities[] = { + { "get", CAP_GET }, + { NULL, 0 } + }; - p = find_hook("read-object"); - if (!p) - return 1; + return subprocess_handshake(subprocess, "git-read-object", versions, + NULL, capabilities, + &entry->supported_capabilities); +} + +static int read_object_process(const struct object_id *oid) +{ + int err; + struct read_object_process *entry; + struct child_process *process; + struct strbuf status = STRBUF_INIT; + const char *cmd = find_hook("read-object"); + uint64_t start; + + start = getnanotime(); + + if (!subprocess_map_initialized) { + subprocess_map_initialized = 1; + hashmap_init(&subprocess_map, (hashmap_cmp_fn)cmd2process_cmp, + NULL, 0); + entry = NULL; + } else { + entry = (struct read_object_process *) subprocess_find_entry(&subprocess_map, cmd); + } + + if (!entry) { + entry = xmalloc(sizeof(*entry)); + entry->supported_capabilities = 0; + + if (subprocess_start(&subprocess_map, &entry->subprocess, cmd, + start_read_object_fn)) { + free(entry); + return -1; + } + } + process = &entry->subprocess.process; + + if (!(CAP_GET & entry->supported_capabilities)) + return -1; + + sigchain_push(SIGPIPE, SIG_IGN); + + err = packet_write_fmt_gently(process->in, "command=get\n"); + if (err) + goto done; + + err = packet_write_fmt_gently(process->in, "sha1=%s\n", oid_to_hex(oid)); + if (err) + goto done; + + err = packet_flush_gently(process->in); + if (err) + goto done; + + err = subprocess_read_status(process->out, &status); + err = err ? err : strcmp(status.buf, "success"); + +done: + sigchain_pop(SIGPIPE); + + if (err || errno == EPIPE) { + err = err ? err : errno; + if (!strcmp(status.buf, "error")) { + /* The process signaled a problem with the file. */ + } + else if (!strcmp(status.buf, "abort")) { + /* + * The process signaled a permanent problem. Don't try to read + * objects with the same command for the lifetime of the current + * Git process. + */ + entry->supported_capabilities &= ~CAP_GET; + } + else { + /* + * Something went wrong with the read-object process. + * Force shutdown and restart if needed. + */ + error("external process '%s' failed", cmd); + subprocess_stop(&subprocess_map, + (struct subprocess_entry *)entry); + free(entry); + } + } - argv_array_push(&hook.args, p); - argv_array_push(&hook.args, oid_to_hex(oid)); - hook.env = NULL; - hook.no_stdin = 1; - hook.stdout_to_stderr = 1; + trace_performance_since(start, "read_object_process"); - return run_command(&hook); + return err; } /* Returns 1 if we have successfully freshened the file, 0 otherwise. */ @@ -954,7 +1048,7 @@ static int check_and_freshen(const struct object_id *oid, int freshen) check_and_freshen_nonlocal(oid, freshen); if (!ret && core_virtualize_objects && !tried_hook) { tried_hook = 1; - if (!run_read_object_hook(oid)) + if (!read_object_process(oid)) goto retry; } @@ -1544,7 +1638,7 @@ static int do_oid_object_info_extended(struct repository *r, break; if (core_virtualize_objects && !tried_hook) { tried_hook = 1; - if (!run_read_object_hook(oid)) + if (!read_object_process(oid)) goto retry; } } diff --git a/t/t0410/read-object b/t/t0410/read-object new file mode 100755 index 00000000000000..85e997c930581c --- /dev/null +++ b/t/t0410/read-object @@ -0,0 +1,114 @@ +#!/usr/bin/perl +# +# Example implementation for the Git read-object protocol version 1 +# See Documentation/technical/read-object-protocol.txt +# +# Allows you to test the ability for blobs to be pulled from a host git repo +# "on demand." Called when git needs a blob it couldn't find locally due to +# a lazy clone that only cloned the commits and trees. +# +# A lazy clone can be simulated via the following commands from the host repo +# you wish to create a lazy clone of: +# +# cd /host_repo +# git rev-parse HEAD +# git init /guest_repo +# git cat-file --batch-check --batch-all-objects | grep -v 'blob' | +# cut -d' ' -f1 | git pack-objects /guest_repo/.git/objects/pack/noblobs +# cd /guest_repo +# git config core.virtualizeobjects true +# git reset --hard +# +# Please note, this sample is a minimal skeleton. No proper error handling +# was implemented. +# + +use strict; +use warnings; + +# +# Point $DIR to the folder where your host git repo is located so we can pull +# missing objects from it +# +my $DIR = "../.git/"; + +sub packet_bin_read { + my $buffer; + my $bytes_read = read STDIN, $buffer, 4; + if ( $bytes_read == 0 ) { + + # EOF - Git stopped talking to us! + exit(); + } + elsif ( $bytes_read != 4 ) { + die "invalid packet: '$buffer'"; + } + my $pkt_size = hex($buffer); + if ( $pkt_size == 0 ) { + return ( 1, "" ); + } + elsif ( $pkt_size > 4 ) { + my $content_size = $pkt_size - 4; + $bytes_read = read STDIN, $buffer, $content_size; + if ( $bytes_read != $content_size ) { + die "invalid packet ($content_size bytes expected; $bytes_read bytes read)"; + } + return ( 0, $buffer ); + } + else { + die "invalid packet size: $pkt_size"; + } +} + +sub packet_txt_read { + my ( $res, $buf ) = packet_bin_read(); + unless ( $buf =~ s/\n$// ) { + die "A non-binary line MUST be terminated by an LF."; + } + return ( $res, $buf ); +} + +sub packet_bin_write { + my $buf = shift; + print STDOUT sprintf( "%04x", length($buf) + 4 ); + print STDOUT $buf; + STDOUT->flush(); +} + +sub packet_txt_write { + packet_bin_write( $_[0] . "\n" ); +} + +sub packet_flush { + print STDOUT sprintf( "%04x", 0 ); + STDOUT->flush(); +} + +( packet_txt_read() eq ( 0, "git-read-object-client" ) ) || die "bad initialize"; +( packet_txt_read() eq ( 0, "version=1" ) ) || die "bad version"; +( packet_bin_read() eq ( 1, "" ) ) || die "bad version end"; + +packet_txt_write("git-read-object-server"); +packet_txt_write("version=1"); +packet_flush(); + +( packet_txt_read() eq ( 0, "capability=get" ) ) || die "bad capability"; +( packet_bin_read() eq ( 1, "" ) ) || die "bad capability end"; + +packet_txt_write("capability=get"); +packet_flush(); + +while (1) { + my ($command) = packet_txt_read() =~ /^command=([^=]+)$/; + + if ( $command eq "get" ) { + my ($sha1) = packet_txt_read() =~ /^sha1=([0-9a-f]{40})$/; + packet_bin_read(); + + system ('git --git-dir="' . $DIR . '" cat-file blob ' . $sha1 . ' | git -c core.virtualizeobjects=false hash-object -w --stdin >/dev/null 2>&1'); + packet_txt_write(($?) ? "status=error" : "status=success"); + packet_flush(); + } else { + die "bad command '$command'"; + } +} diff --git a/t/t0411-read-object.sh b/t/t0411-read-object.sh index 0c3bfea1886cf9..b8d7521c2c9106 100755 --- a/t/t0411-read-object.sh +++ b/t/t0411-read-object.sh @@ -1,27 +1,27 @@ #!/bin/sh -test_description='tests for read-object hook' +test_description='tests for long running read-object process' . ./test-lib.sh -test_expect_success 'setup host and guest repos' ' +test_expect_success 'setup host repo with a root commit' ' test_commit zero && - hash1=$(git ls-tree HEAD | grep zero.t | cut -f1 | cut -d\ -f3) && - git init guest-repo && - cd guest-repo && - git config core.virtualizeobjects true && - write_script .git/hooks/read-object <<-\EOF - # pass core.virtualizeobjects=false so we dont end up calling the hook proc recursively - git --git-dir=../.git/ cat-file blob "$1" | git -c core.virtualizeobjects=false hash-object -w --stdin >/dev/null 2>&1 - EOF + hash1=$(git ls-tree HEAD | grep zero.t | cut -f1 | cut -d\ -f3) ' test_expect_success 'blobs can be retrieved from the host repo' ' - git cat-file blob "$hash1" + git init guest-repo && + (cd guest-repo && + mkdir -p .git/hooks && + cp $TEST_DIRECTORY/t0410/read-object .git/hooks/ && + git config core.virtualizeobjects true && + git cat-file blob "$hash1") ' test_expect_success 'invalid blobs generate errors' ' - test_must_fail git cat-file blob "invalid" + (cd guest-repo && + test_must_fail git cat-file blob "invalid") ' + test_done From 0e12bee541e8d7007602a8b52bc5608770422098 Mon Sep 17 00:00:00 2001 From: Ben Peart Date: Tue, 24 May 2016 00:32:38 +0000 Subject: [PATCH 015/104] gvfs: add global command pre and post hook procs This adds hard-coded call to GVFS.hooks.exe before and after each Git command runs. To make sure that this is only called on repositories cloned with GVFS, we test for the tell-tale .gvfs. Signed-off-by: Ben Peart --- git.c | 79 ++++++++++++++++++++++++++++++++++-- run-command.c | 51 ++++++++++++++++++++++- t/t0400-pre-command-hook.sh | 34 ++++++++++++++++ t/t0401-post-command-hook.sh | 32 +++++++++++++++ 4 files changed, 192 insertions(+), 4 deletions(-) create mode 100755 t/t0400-pre-command-hook.sh create mode 100755 t/t0401-post-command-hook.sh diff --git a/git.c b/git.c index a2d337eed77c13..27a81a83b0c0d1 100644 --- a/git.c +++ b/git.c @@ -5,6 +5,7 @@ #include "run-command.h" #include "alias.h" #include "shallow.h" +#include "dir.h" #define RUN_SETUP (1<<0) #define RUN_SETUP_GENTLY (1<<1) @@ -403,6 +404,63 @@ static int handle_alias(int *argcp, const char ***argv) return ret; } +/* Runs pre/post-command hook */ +static struct argv_array sargv = ARGV_ARRAY_INIT; +static int run_post_hook = 0; +static int exit_code = -1; + +static int run_pre_command_hook(const char **argv) +{ + char *lock; + int ret = 0; + + /* + * Ensure the global pre/post command hook is only called for + * the outer command and not when git is called recursively + * or spawns multiple commands (like with the alias command) + */ + lock = getenv("COMMAND_HOOK_LOCK"); + if (lock && !strcmp(lock, "true")) + return 0; + setenv("COMMAND_HOOK_LOCK", "true", 1); + + /* call the hook proc */ + argv_array_pushv(&sargv, argv); + ret = run_hook_argv(NULL, "pre-command", sargv.argv); + + if (!ret) + run_post_hook = 1; + return ret; +} + +static int run_post_command_hook(void) +{ + char *lock; + int ret = 0; + + /* + * Only run post_command if pre_command succeeded in this process + */ + if (!run_post_hook) + return 0; + lock = getenv("COMMAND_HOOK_LOCK"); + if (!lock || strcmp(lock, "true")) + return 0; + + argv_array_pushf(&sargv, "--exit_code=%u", exit_code); + ret = run_hook_argv(NULL, "post-command", sargv.argv); + + run_post_hook = 0; + argv_array_clear(&sargv); + setenv("COMMAND_HOOK_LOCK", "false", 1); + return ret; +} + +static void post_command_hook_atexit(void) +{ + run_post_command_hook(); +} + static int run_builtin(struct cmd_struct *p, int argc, const char **argv) { int status, help; @@ -439,18 +497,23 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) if (!help && p->option & NEED_WORK_TREE) setup_work_tree(); + if (run_pre_command_hook(argv)) + die("pre-command hook aborted command"); + trace_argv_printf(argv, "trace: built-in: git"); trace2_cmd_name(p->cmd); trace2_cmd_list_config(); trace2_cmd_list_env_vars(); validate_cache_entries(the_repository->index); - status = p->fn(argc, argv, prefix); + exit_code = status = p->fn(argc, argv, prefix); validate_cache_entries(the_repository->index); if (status) return status; + run_post_command_hook(); + /* Somebody closed stdout? */ if (fstat(fileno(stdout), &st)) return 0; @@ -701,13 +764,16 @@ static void execv_dashed_external(const char **argv) */ trace_argv_printf(cmd.args.argv, "trace: exec:"); + if (run_pre_command_hook(cmd.args.argv)) + die("pre-command hook aborted command"); + /* * If we fail because the command is not found, it is * OK to return. Otherwise, we just pass along the status code, * or our usual generic code if we were not even able to exec * the program. */ - status = run_command(&cmd); + exit_code = status = run_command(&cmd); /* * If the child process ran and we are now going to exit, emit a @@ -718,6 +784,8 @@ static void execv_dashed_external(const char **argv) exit(status); else if (errno != ENOENT) exit(128); + + run_post_command_hook(); } static int run_argv(int *argcp, const char ***argv) @@ -825,6 +893,7 @@ int cmd_main(int argc, const char **argv) } trace_command_performance(argv); + atexit(post_command_hook_atexit); /* * "git-xxxx" is the same as "git xxxx", but we obviously: @@ -852,10 +921,14 @@ int cmd_main(int argc, const char **argv) } else { /* The user didn't specify a command; give them help */ commit_pager_choice(); + if (run_pre_command_hook(argv)) + die("pre-command hook aborted command"); printf(_("usage: %s\n\n"), git_usage_string); list_common_cmds_help(); printf("\n%s\n", _(git_more_info_string)); - exit(1); + exit_code = 1; + run_post_command_hook(); + exit(exit_code); } cmd = argv[0]; diff --git a/run-command.c b/run-command.c index 5aed5228a590ec..29067f9fd9286e 100644 --- a/run-command.c +++ b/run-command.c @@ -7,6 +7,7 @@ #include "strbuf.h" #include "string-list.h" #include "quote.h" +#include "config.h" void child_process_init(struct child_process *child) { @@ -1308,12 +1309,60 @@ int async_with_fork(void) #endif } +static int early_hooks_path_config(const char *var, const char *value, void *data) +{ + if (!strcmp(var, "core.hookspath")) + return git_config_pathname((const char **)data, var, value); + + return 0; +} + +/* Discover the hook before setup_git_directory() was called */ +static const char *hook_path_early(const char *name, struct strbuf *result) +{ + static struct strbuf hooks_dir = STRBUF_INIT; + static int initialized; + + if (initialized < 0) + return NULL; + + if (!initialized) { + struct strbuf gitdir = STRBUF_INIT, commondir = STRBUF_INIT; + const char *early_hooks_dir = NULL; + + if (discover_git_directory(&commondir, &gitdir) < 0) { + initialized = -1; + return NULL; + } + + read_early_config(early_hooks_path_config, &early_hooks_dir); + if (!early_hooks_dir) + strbuf_addf(&hooks_dir, "%s/hooks/", commondir.buf); + else { + strbuf_add_absolute_path(&hooks_dir, early_hooks_dir); + strbuf_addch(&hooks_dir, '/'); + } + + strbuf_release(&gitdir); + strbuf_release(&commondir); + + initialized = 1; + } + + strbuf_addf(result, "%s%s", hooks_dir.buf, name); + return result->buf; +} + const char *find_hook(const char *name) { static struct strbuf path = STRBUF_INIT; strbuf_reset(&path); - strbuf_git_path(&path, "hooks/%s", name); + if (have_git_dir()) + strbuf_git_path(&path, "hooks/%s", name); + else if (!hook_path_early(name, &path)) + return NULL; + if (access(path.buf, X_OK) < 0) { int err = errno; diff --git a/t/t0400-pre-command-hook.sh b/t/t0400-pre-command-hook.sh new file mode 100755 index 00000000000000..4f4f610b52b0a0 --- /dev/null +++ b/t/t0400-pre-command-hook.sh @@ -0,0 +1,34 @@ +#!/bin/sh + +test_description='pre-command hook' + +. ./test-lib.sh + +test_expect_success 'with no hook' ' + echo "first" > file && + git add file && + git commit -m "first" +' + +test_expect_success 'with succeeding hook' ' + mkdir -p .git/hooks && + write_script .git/hooks/pre-command <<-EOF && + echo "\$*" >\$(git rev-parse --git-dir)/pre-command.out + EOF + echo "second" >> file && + git add file && + test "add file" = "$(cat .git/pre-command.out)" && + echo Hello | git hash-object --stdin && + test "hash-object --stdin" = "$(cat .git/pre-command.out)" +' + +test_expect_success 'with failing hook' ' + write_script .git/hooks/pre-command <<-EOF && + exit 1 + EOF + echo "third" >> file && + test_must_fail git add file && + test_path_is_missing "$(cat .git/pre-command.out)" +' + +test_done diff --git a/t/t0401-post-command-hook.sh b/t/t0401-post-command-hook.sh new file mode 100755 index 00000000000000..64646f7ad03b57 --- /dev/null +++ b/t/t0401-post-command-hook.sh @@ -0,0 +1,32 @@ +#!/bin/sh + +test_description='post-command hook' + +. ./test-lib.sh + +test_expect_success 'with no hook' ' + echo "first" > file && + git add file && + git commit -m "first" +' + +test_expect_success 'with succeeding hook' ' + mkdir -p .git/hooks && + write_script .git/hooks/post-command <<-EOF && + echo "\$*" >\$(git rev-parse --git-dir)/post-command.out + EOF + echo "second" >> file && + git add file && + test "add file --exit_code=0" = "$(cat .git/post-command.out)" +' + +test_expect_success 'with failing pre-command hook' ' + write_script .git/hooks/pre-command <<-EOF && + exit 1 + EOF + echo "third" >> file && + test_must_fail git add file && + test_path_is_missing "$(cat .git/post-command.out)" +' + +test_done From 5bad1782d27353c7b3a16f8a9470fc13693358f3 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Fri, 8 Sep 2017 11:32:43 +0200 Subject: [PATCH 016/104] sha1_file: when writing objects, skip the read_object_hook If we are going to write an object there is no use in calling the read object hook to get an object from a potentially remote source. We would rather just write out the object and avoid the potential round trip for an object that doesn't exist. This change adds a flag to the check_and_freshen() and freshen_loose_object() functions' signatures so that the hook is bypassed when the functions are called before writing loose objects. The check for a local object is still performed so we don't overwrite something that has already been written to one of the objects directories. Based on a patch by Kevin Willford. Signed-off-by: Johannes Schindelin --- sha1-file.c | 17 ++++++++++------- t/t0410/read-object | 4 ++++ t/t0411-read-object.sh | 7 +++++++ 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/sha1-file.c b/sha1-file.c index 5253726d5889d2..d15c1d1b460271 100644 --- a/sha1-file.c +++ b/sha1-file.c @@ -1038,7 +1038,8 @@ static int check_and_freshen_nonlocal(const struct object_id *oid, int freshen) return 0; } -static int check_and_freshen(const struct object_id *oid, int freshen) +static int check_and_freshen(const struct object_id *oid, int freshen, + int skip_virtualized_objects) { int ret; int tried_hook = 0; @@ -1046,7 +1047,8 @@ static int check_and_freshen(const struct object_id *oid, int freshen) retry: ret = check_and_freshen_local(oid, freshen) || check_and_freshen_nonlocal(oid, freshen); - if (!ret && core_virtualize_objects && !tried_hook) { + if (!ret && core_virtualize_objects && !skip_virtualized_objects && + !tried_hook) { tried_hook = 1; if (!read_object_process(oid)) goto retry; @@ -1062,7 +1064,7 @@ int has_loose_object_nonlocal(const struct object_id *oid) static int has_loose_object(const struct object_id *oid) { - return check_and_freshen(oid, 0); + return check_and_freshen(oid, 0, 0); } static void mmap_limit_check(size_t length) @@ -2039,9 +2041,10 @@ static int write_loose_object(const struct object_id *oid, char *hdr, return finalize_object_file(tmp_file.buf, filename.buf); } -static int freshen_loose_object(const struct object_id *oid) +static int freshen_loose_object(const struct object_id *oid, + int skip_virtualized_objects) { - return check_and_freshen(oid, 1); + return check_and_freshen(oid, 1, skip_virtualized_objects); } static int freshen_packed_object(const struct object_id *oid) @@ -2068,7 +2071,7 @@ int write_object_file(const void *buf, unsigned long len, const char *type, */ write_object_file_prepare(the_hash_algo, buf, len, type, oid, hdr, &hdrlen); - if (freshen_packed_object(oid) || freshen_loose_object(oid)) + if (freshen_packed_object(oid) || freshen_loose_object(oid, 1)) return 0; return write_loose_object(oid, hdr, hdrlen, buf, len, 0); } @@ -2088,7 +2091,7 @@ int hash_object_file_literally(const void *buf, unsigned long len, if (!(flags & HASH_WRITE_OBJECT)) goto cleanup; - if (freshen_packed_object(oid) || freshen_loose_object(oid)) + if (freshen_packed_object(oid) || freshen_loose_object(oid, 1)) goto cleanup; status = write_loose_object(oid, header, hdrlen, buf, len, 0); diff --git a/t/t0410/read-object b/t/t0410/read-object index 85e997c930581c..2360d4916ee81c 100755 --- a/t/t0410/read-object +++ b/t/t0410/read-object @@ -108,6 +108,10 @@ while (1) { system ('git --git-dir="' . $DIR . '" cat-file blob ' . $sha1 . ' | git -c core.virtualizeobjects=false hash-object -w --stdin >/dev/null 2>&1'); packet_txt_write(($?) ? "status=error" : "status=success"); packet_flush(); + + open my $log, '>>.git/read-object-hook.log'; + print $log "Read object $sha1, exit code $?\n"; + close $log; } else { die "bad command '$command'"; } diff --git a/t/t0411-read-object.sh b/t/t0411-read-object.sh index b8d7521c2c9106..af97ec5b50f356 100755 --- a/t/t0411-read-object.sh +++ b/t/t0411-read-object.sh @@ -23,5 +23,12 @@ test_expect_success 'invalid blobs generate errors' ' test_must_fail git cat-file blob "invalid") ' +test_expect_success 'read-object-hook is bypassed when writing objects' ' + (cd guest-repo && + echo hello >hello.txt && + git add hello.txt && + hash="$(git rev-parse --verify :hello.txt)" && + ! grep "$hash" .git/read-object-hook.log) +' test_done From 3ff73431293a5be3f97e8fac18f49a7988a2c7d2 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Thu, 16 Mar 2017 21:07:54 +0100 Subject: [PATCH 017/104] t0400: verify that the hook is called correctly from a subdirectory Suggested by Ben Peart. Signed-off-by: Johannes Schindelin --- t/t0400-pre-command-hook.sh | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/t/t0400-pre-command-hook.sh b/t/t0400-pre-command-hook.sh index 4f4f610b52b0a0..83c453c9643eae 100755 --- a/t/t0400-pre-command-hook.sh +++ b/t/t0400-pre-command-hook.sh @@ -31,4 +31,27 @@ test_expect_success 'with failing hook' ' test_path_is_missing "$(cat .git/pre-command.out)" ' +test_expect_success 'in a subdirectory' ' + echo touch i-was-here | write_script .git/hooks/pre-command && + mkdir sub && + ( + cd sub && + git version + ) && + test_path_is_file sub/i-was-here +' + +test_expect_success 'in a subdirectory, using an alias' ' + git reset --hard && + echo "echo \"\$@; \$(pwd)\" >>log" | + write_script .git/hooks/pre-command && + mkdir -p sub && + ( + cd sub && + git -c alias.v="version" v + ) && + test_path_is_missing log && + test_line_count = 2 sub/log +' + test_done From 7a272055c135449e04bc87e211ed4aca179d4a4a Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 22 Aug 2017 11:54:23 -0400 Subject: [PATCH 018/104] status: add status serialization mechanism Teach STATUS to optionally serialize the results of a status computation to a file. Teach STATUS to optionally read an existing serialization file and simply print the results, rather than actually scanning. This is intended for immediate status results on extremely large repos and assumes the use of a service/daemon to maintain a fresh current status snapshot. Signed-off-by: Jeff Hostetler --- Documentation/config/status.txt | 6 + Documentation/git-status.txt | 33 + .../technical/status-serialization-format.txt | 107 ++++ Makefile | 2 + builtin/commit.c | 123 +++- contrib/completion/git-completion.bash | 2 +- pkt-line.c | 2 +- pkt-line.h | 1 + t/t7523-status-complete-untracked.sh | 39 ++ t/t7524-serialized-status.sh | 140 ++++ wt-status-deserialize.c | 597 ++++++++++++++++++ wt-status-serialize.c | 213 +++++++ wt-status.c | 6 + wt-status.h | 52 +- 14 files changed, 1319 insertions(+), 4 deletions(-) create mode 100644 Documentation/technical/status-serialization-format.txt create mode 100755 t/t7523-status-complete-untracked.sh create mode 100755 t/t7524-serialized-status.sh create mode 100644 wt-status-deserialize.c create mode 100644 wt-status-serialize.c diff --git a/Documentation/config/status.txt b/Documentation/config/status.txt index 0fc704ab80b223..65cecc12e80c80 100644 --- a/Documentation/config/status.txt +++ b/Documentation/config/status.txt @@ -75,3 +75,9 @@ status.submoduleSummary:: the --ignore-submodules=dirty command-line option or the 'git submodule summary' command, which shows a similar output but does not honor these settings. + +status.deserializePath:: + EXPERIMENTAL, Pathname to a file containing cached status results + generated by `--serialize`. This will be overridden by + `--deserialize=` on the command line. If the cache file is + invalid or stale, git will fall-back and compute status normally. diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt index 0646c606cfa46f..990bba01a980c7 100644 --- a/Documentation/git-status.txt +++ b/Documentation/git-status.txt @@ -156,6 +156,19 @@ ignored, then the directory is not shown, but all contents are shown. update it afterwards if any changes were detected. Defaults to `--lock-index`. +--serialize[=]:: + (EXPERIMENTAL) Serialize raw status results to stdout in a + format suitable for use by `--deserialize`. Valid values for + `` are "1" and "v1". + +--deserialize[=]:: + (EXPERIMENTAL) Deserialize raw status results from a file or + stdin rather than scanning the worktree. If `` is omitted + and `status.deserializePath` is unset, input is read from stdin. +--no-deserialize:: + (EXPERIMENTAL) Disable implicit deserialization of status results + from the value of `status.deserializePath`. + ...:: See the 'pathspec' entry in linkgit:gitglossary[7]. @@ -405,6 +418,26 @@ quoted as explained for the configuration variable `core.quotePath` (see linkgit:git-config[1]). +SERIALIZATION and DESERIALIZATION (EXPERIMENTAL) +------------------------------------------------ + +The `--serialize` option allows git to cache the result of a +possibly time-consuming status scan to a binary file. A local +service/daemon watching file system events could use this to +periodically pre-compute a fresh status result. + +Interactive users could then use `--deserialize` to simply +(and immediately) print the last-known-good result without +waiting for the status scan. + +The binary serialization file format includes some worktree state +information allowing `--deserialize` to reject the cached data +and force a normal status scan if, for example, the commit, branch, +or status modes/options change. The format cannot, however, indicate +when the cached data is otherwise stale -- that coordination belongs +to the task driving the serializations. + + CONFIGURATION ------------- diff --git a/Documentation/technical/status-serialization-format.txt b/Documentation/technical/status-serialization-format.txt new file mode 100644 index 00000000000000..475ae814495581 --- /dev/null +++ b/Documentation/technical/status-serialization-format.txt @@ -0,0 +1,107 @@ +Git status serialization format +=============================== + +Git status serialization enables git to dump the results of a status scan +to a binary file. This file can then be loaded by later status invocations +to print the cached status results. + +The file contains the essential fields from: +() the index +() the "struct wt_status" for the overall results +() the contents of "struct wt_status_change_data" for tracked changed files +() the list of untracked and ignored files + +Version 1 Format: +================= + +The V1 file begins with a required header section followed by optional +sections for each type of item (changed, untracked, ignored). Individual +item sections are only present if necessary. Each item section begins +with an item-type header with the number of items in the section. + +Each "line" in the format is encoded using pkt-line with a final LF. +Flush packets are used to terminate sections. + +----------------- +PKT-LINE("version" SP "1") + +[] +[] +[] +----------------- + + +V1 Header +--------- + +The v1-header-section fields are taken directly from "struct wt_status". +Each field is printed on a separate pkt-line. Lines for NULL string +values are omitted. All integers are printed with "%d". OIDs are +printed in hex. + +v1-header-section = + + PKT-LINE() + +v1-index-headers = PKT-LINE("index_mtime" SP SP LF) + +v1-wt-status-headers = PKT-LINE("is_initial" SP LF) + [ PKT-LINE("branch" SP LF) ] + [ PKT-LINE("reference" SP LF) ] + PKT-LINE("show_ignored_files" SP LF) + PKT-LINE("show_untracked_files" SP LF) + PKT-LINE("show_ignored_directory" SP LF) + [ PKT-LINE("ignore_submodule_arg" SP LF) ] + PKT-LINE("detect_rename" SP LF) + PKT-LINE("rename_score" SP LF) + PKT-LINE("rename_limit" SP LF) + PKT-LINE("detect_break" SP LF) + PKT-LINE("sha1_commit" SP LF) + PKT-LINE("committable" SP LF) + PKT-LINE("workdir_dirty" SP LF) + + +V1 Changed Items +---------------- + +The v1-changed-item-section lists all of the changed items with one +item per pkt-line. Each pkt-line contains: a binary block of data +from "struct wt_status_serialize_data_fixed" in a fixed header where +integers are in network byte order and OIDs are in raw (non-hex) form. +This is followed by one or two raw pathnames (not c-quoted) with NUL +terminators (both NULs are always present even if there is no rename). + +v1-changed-item-section = PKT-LINE("changed" SP LF) + [ PKT-LINE( LF) ]+ + PKT-LINE() + +changed_item = + + + + + + + + + + + + NUL + [ ] + NUL + + +V1 Untracked and Ignored Items +------------------------------ + +These sections are simple lists of pathnames. They ARE NOT +c-quoted. + +v1-untracked-item-section = PKT-LINE("untracked" SP LF) + [ PKT-LINE( LF) ]+ + PKT-LINE() + +v1-ignored-item-section = PKT-LINE("ignored" SP LF) + [ PKT-LINE( LF) ]+ + PKT-LINE() diff --git a/Makefile b/Makefile index fdbe52e9169993..fe36dce720106b 100644 --- a/Makefile +++ b/Makefile @@ -1035,6 +1035,8 @@ LIB_OBJS += wrapper.o LIB_OBJS += write-or-die.o LIB_OBJS += ws.o LIB_OBJS += wt-status.o +LIB_OBJS += wt-status-deserialize.o +LIB_OBJS += wt-status-serialize.o LIB_OBJS += xdiff-interface.o LIB_OBJS += zlib.o diff --git a/builtin/commit.c b/builtin/commit.c index de406e35ba4983..35cf86068b8403 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -148,6 +148,70 @@ static int opt_parse_porcelain(const struct option *opt, const char *arg, int un return 0; } +static int do_serialize = 0; +static int do_implicit_deserialize = 0; +static int do_explicit_deserialize = 0; +static char *deserialize_path = NULL; + +/* + * --serialize | --serialize=1 | --serialize=v1 + * + * Request that we serialize our output rather than printing in + * any of the established formats. Optionally specify serialization + * version. + */ +static int opt_parse_serialize(const struct option *opt, const char *arg, int unset) +{ + enum wt_status_format *value = (enum wt_status_format *)opt->value; + if (unset || !arg) + *value = STATUS_FORMAT_SERIALIZE_V1; + else if (!strcmp(arg, "v1") || !strcmp(arg, "1")) + *value = STATUS_FORMAT_SERIALIZE_V1; + else + die("unsupported serialize version '%s'", arg); + + if (do_explicit_deserialize) + die("cannot mix --serialize and --deserialize"); + do_implicit_deserialize = 0; + + do_serialize = 1; + return 0; +} + +/* + * --deserialize | --deserialize= | + * --no-deserialize + * + * Request that we deserialize status data from some existing resource + * rather than performing a status scan. + * + * The input source can come from stdin or a path given here -- or be + * inherited from the config settings. + */ +static int opt_parse_deserialize(const struct option *opt, const char *arg, int unset) +{ + if (unset) { + do_implicit_deserialize = 0; + do_explicit_deserialize = 0; + } else { + if (do_serialize) + die("cannot mix --serialize and --deserialize"); + if (arg) { + /* override config or stdin */ + free(deserialize_path); + deserialize_path = xstrdup(arg); + } + if (deserialize_path && *deserialize_path + && (access(deserialize_path, R_OK) != 0)) + die("cannot find serialization file '%s'", + deserialize_path); + + do_explicit_deserialize = 1; + } + + return 0; +} + static int opt_parse_m(const struct option *opt, const char *arg, int unset) { struct strbuf *buf = opt->value; @@ -1081,6 +1145,8 @@ static void handle_untracked_files_arg(struct wt_status *s) s->show_untracked_files = SHOW_NORMAL_UNTRACKED_FILES; else if (!strcmp(untracked_files_arg, "all")) s->show_untracked_files = SHOW_ALL_UNTRACKED_FILES; + else if (!strcmp(untracked_files_arg,"complete")) + s->show_untracked_files = SHOW_COMPLETE_UNTRACKED_FILES; /* * Please update $__git_untracked_file_modes in * git-completion.bash when you add new options @@ -1320,6 +1386,19 @@ static int git_status_config(const char *k, const char *v, void *cb) s->relative_paths = git_config_bool(k, v); return 0; } + if (!strcmp(k, "status.deserializepath")) { + /* + * Automatically assume deserialization if this is + * set in the config and the file exists. Do not + * complain if the file does not exist, because we + * silently fall back to normal mode. + */ + if (v && *v && access(v, R_OK) == 0) { + do_implicit_deserialize = 1; + deserialize_path = xstrdup(v); + } + return 0; + } if (!strcmp(k, "status.showuntrackedfiles")) { if (!v) return config_error_nonbool(k); @@ -1362,7 +1441,8 @@ int cmd_status(int argc, const char **argv, const char *prefix) static int show_ignored_directory = 0; static struct wt_status s; unsigned int progress_flag = 0; - int fd; + int try_deserialize; + int fd = -1; struct object_id oid; static struct option builtin_status_options[] = { OPT__VERBOSE(&verbose, N_("be verbose")), @@ -1377,6 +1457,12 @@ int cmd_status(int argc, const char **argv, const char *prefix) OPT_CALLBACK_F(0, "porcelain", &status_format, N_("version"), N_("machine-readable output"), PARSE_OPT_OPTARG, opt_parse_porcelain), + { OPTION_CALLBACK, 0, "serialize", &status_format, + N_("version"), N_("serialize raw status data to stdout"), + PARSE_OPT_OPTARG | PARSE_OPT_NONEG, opt_parse_serialize }, + { OPTION_CALLBACK, 0, "deserialize", NULL, + N_("path"), N_("deserialize raw status data from file"), + PARSE_OPT_OPTARG, opt_parse_deserialize }, OPT_SET_INT(0, "long", &status_format, N_("show status in long format (default)"), STATUS_FORMAT_LONG), @@ -1437,10 +1523,26 @@ int cmd_status(int argc, const char **argv, const char *prefix) s.show_untracked_files == SHOW_NO_UNTRACKED_FILES) die(_("Unsupported combination of ignored and untracked-files arguments")); + if (s.show_untracked_files == SHOW_COMPLETE_UNTRACKED_FILES && + s.show_ignored_mode == SHOW_NO_IGNORED) + die(_("Complete Untracked only supported with ignored files")); + parse_pathspec(&s.pathspec, 0, PATHSPEC_PREFER_FULL, prefix, argv); + /* + * If we want to try to deserialize status data from a cache file, + * we need to re-order the initialization code. The problem is that + * this makes for a very nasty diff and causes merge conflicts as we + * carry it forward. And it easy to mess up the merge, so we + * duplicate some code here to hopefully reduce conflicts. + */ + try_deserialize = (!do_serialize && + (do_implicit_deserialize || do_explicit_deserialize)); + if (try_deserialize) + goto skip_init; + enable_fscache(0); if (status_format != STATUS_FORMAT_PORCELAIN && status_format != STATUS_FORMAT_PORCELAIN_V2) @@ -1455,6 +1557,7 @@ int cmd_status(int argc, const char **argv, const char *prefix) else fd = -1; +skip_init: s.is_initial = get_oid(s.reference, &oid) ? 1 : 0; if (!s.is_initial) oidcpy(&s.oid_commit, &oid); @@ -1471,6 +1574,24 @@ int cmd_status(int argc, const char **argv, const char *prefix) s.rename_score = parse_rename_score(&rename_score_arg); } + if (try_deserialize) { + if (s.relative_paths) + s.prefix = prefix; + + if (wt_status_deserialize(&s, deserialize_path) == DESERIALIZE_OK) + return 0; + + /* deserialize failed, so force the initialization we skipped above. */ + enable_fscache(1); + read_cache_preload(&s.pathspec); + refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, &s.pathspec, NULL, NULL); + + if (use_optional_locks()) + fd = hold_locked_index(&index_lock, 0); + else + fd = -1; + } + wt_status_collect(&s); if (0 <= fd) diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash index 70ad04e1b2a8c6..72321ceecdf5c2 100644 --- a/contrib/completion/git-completion.bash +++ b/contrib/completion/git-completion.bash @@ -1439,7 +1439,7 @@ _git_clone () esac } -__git_untracked_file_modes="all no normal" +__git_untracked_file_modes="all no normal complete" _git_commit () { diff --git a/pkt-line.c b/pkt-line.c index a0e87b1e81408e..308ab8d5f78781 100644 --- a/pkt-line.c +++ b/pkt-line.c @@ -187,7 +187,7 @@ int packet_write_fmt_gently(int fd, const char *fmt, ...) return status; } -static int packet_write_gently(const int fd_out, const char *buf, size_t size) +int packet_write_gently(const int fd_out, const char *buf, size_t size) { static char packet_write_buffer[LARGE_PACKET_MAX]; size_t packet_size; diff --git a/pkt-line.h b/pkt-line.h index fef3a0d792d31b..28b7e785784597 100644 --- a/pkt-line.h +++ b/pkt-line.h @@ -31,6 +31,7 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((f void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len); int packet_flush_gently(int fd); int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3))); +int packet_write_gently(const int fd_out, const char *buf, size_t size); int write_packetized_from_fd(int fd_in, int fd_out); int write_packetized_from_buf(const char *src_in, size_t len, int fd_out); diff --git a/t/t7523-status-complete-untracked.sh b/t/t7523-status-complete-untracked.sh new file mode 100755 index 00000000000000..2f4476654a5570 --- /dev/null +++ b/t/t7523-status-complete-untracked.sh @@ -0,0 +1,39 @@ +#!/bin/sh + +test_description='git status untracked complete tests' + +. ./test-lib.sh + +test_expect_success 'setup' ' + cat >.gitignore <<-\EOF && + *.ign + ignored_dir/ + EOF + + mkdir tracked ignored_dir && + touch tracked_1.txt tracked/tracked_1.txt && + git add . && + test_tick && + git commit -m"Adding original file." && + mkdir untracked && + touch ignored.ign ignored_dir/ignored_2.txt \ + untracked_1.txt untracked/untracked_2.txt untracked/untracked_3.txt +' + +test_expect_success 'verify untracked-files=complete' ' + cat >expect <<-\EOF && + ? expect + ? output + ? untracked/ + ? untracked/untracked_2.txt + ? untracked/untracked_3.txt + ? untracked_1.txt + ! ignored.ign + ! ignored_dir/ + EOF + + git status --porcelain=v2 --untracked-files=complete --ignored >output && + test_i18ncmp expect output +' + +test_done diff --git a/t/t7524-serialized-status.sh b/t/t7524-serialized-status.sh new file mode 100755 index 00000000000000..a0fc1bc2daf6ec --- /dev/null +++ b/t/t7524-serialized-status.sh @@ -0,0 +1,140 @@ +#!/bin/sh + +test_description='git serialized status tests' + +. ./test-lib.sh + +# This file includes tests for serializing / deserializing +# status data. These tests cover two basic features: +# +# [1] Because users can request different types of untracked-file +# and ignored file reporting, the cache data generated by +# serialize must use either the same untracked and ignored +# parameters as the later deserialize invocation; otherwise, +# the deserialize invocation must disregard the cached data +# and run a full scan itself. +# +# To increase the number of cases where the cached status can +# be used, we have added a "--untracked-file=complete" option +# that reports a superset or union of the results from the +# "-u normal" and "-u all". We combine this with a filter in +# deserialize to filter the results. +# +# Ignored file reporting is simpler in that is an all or +# nothing; there are no subsets. +# +# The tests here (in addition to confirming that a cache +# file can be generated and used by a subsequent status +# command) need to test this untracked-file filtering. +# +# [2] ensuring the status calls are using data from the status +# cache as expected. This includes verifying cached data +# is used when appropriate as well as falling back to +# performing a new status scan when the data in the cache +# is insufficient/known stale. + +test_expect_success 'setup' ' + cat >.gitignore <<-\EOF && + *.ign + ignored_dir/ + EOF + + mkdir tracked ignored_dir && + touch tracked_1.txt tracked/tracked_1.txt && + git add . && + test_tick && + git commit -m"Adding original file." && + mkdir untracked && + touch ignored.ign ignored_dir/ignored_2.txt \ + untracked_1.txt untracked/untracked_2.txt untracked/untracked_3.txt +' + +test_expect_success 'verify untracked-files=complete with no conversion' ' + test_when_finished "rm serialized_status.dat new_change.txt output" && + cat >expect <<-\EOF && + ? expect + ? serialized_status.dat + ? untracked/ + ? untracked/untracked_2.txt + ? untracked/untracked_3.txt + ? untracked_1.txt + ! ignored.ign + ! ignored_dir/ + EOF + + git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat && + touch new_change.txt && + + git status --porcelain=v2 --untracked-files=complete --ignored=matching --deserialize=serialized_status.dat >output && + test_i18ncmp expect output +' + +test_expect_success 'verify untracked-files=complete to untracked-files=normal conversion' ' + test_when_finished "rm serialized_status.dat new_change.txt output" && + cat >expect <<-\EOF && + ? expect + ? serialized_status.dat + ? untracked/ + ? untracked_1.txt + EOF + + git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat && + touch new_change.txt && + + git status --porcelain=v2 --deserialize=serialized_status.dat >output && + test_i18ncmp expect output +' + +test_expect_success 'verify untracked-files=complete to untracked-files=all conversion' ' + test_when_finished "rm serialized_status.dat new_change.txt output" && + cat >expect <<-\EOF && + ? expect + ? serialized_status.dat + ? untracked/untracked_2.txt + ? untracked/untracked_3.txt + ? untracked_1.txt + ! ignored.ign + ! ignored_dir/ + EOF + + git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat && + touch new_change.txt && + + git status --porcelain=v2 --untracked-files=all --ignored=matching --deserialize=serialized_status.dat >output && + test_i18ncmp expect output +' + +test_expect_success 'verify serialized status with non-convertible ignore mode does new scan' ' + test_when_finished "rm serialized_status.dat new_change.txt output" && + cat >expect <<-\EOF && + ? expect + ? new_change.txt + ? output + ? serialized_status.dat + ? untracked/ + ? untracked_1.txt + ! ignored.ign + ! ignored_dir/ + EOF + + git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat && + touch new_change.txt && + + git status --porcelain=v2 --ignored --deserialize=serialized_status.dat >output && + test_i18ncmp expect output +' + +test_expect_success 'verify serialized status handles path scopes' ' + test_when_finished "rm serialized_status.dat new_change.txt output" && + cat >expect <<-\EOF && + ? untracked/ + EOF + + git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat && + touch new_change.txt && + + git status --porcelain=v2 --deserialize=serialized_status.dat untracked >output && + test_i18ncmp expect output +' + +test_done diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c new file mode 100644 index 00000000000000..317b88fddc1ec0 --- /dev/null +++ b/wt-status-deserialize.c @@ -0,0 +1,597 @@ +#include "cache.h" +#include "wt-status.h" +#include "pkt-line.h" +#include "trace.h" + +static struct trace_key trace_deserialize = TRACE_KEY_INIT(DESERIALIZE); + +enum deserialize_parse_strategy { + DESERIALIZE_STRATEGY_AS_IS, + DESERIALIZE_STRATEGY_SKIP, + DESERIALIZE_STRATEGY_NORMAL, + DESERIALIZE_STRATEGY_ALL +}; + +static int check_path_contains(const char *out, int out_len, const char *in, int in_len) +{ + return (out_len > 0 && + out_len < in_len && + (out[out_len - 1] == '/') && + !memcmp(out, in, out_len)); +} + +static const char *my_packet_read_line(int fd, int *line_len) +{ + static char buf[LARGE_PACKET_MAX]; + + *line_len = packet_read(fd, NULL, NULL, buf, sizeof(buf), + PACKET_READ_CHOMP_NEWLINE | + PACKET_READ_GENTLE_ON_EOF); + return (*line_len > 0) ? buf : NULL; +} + +/* + * mtime_reported contains the mtime of the index when the + * serialization snapshot was computed. + * + * mtime_observed_on_disk contains the mtime of the index now. + * + * If these 2 times are different, then the .git/index has + * changed since the serialization cache was created and we + * must reject the cache because anything could have changed. + * + * If they are the same, we continue trying to use the cache. + */ +static int my_validate_index(const struct cache_time *mtime_reported) +{ + const char *path = get_index_file(); + struct stat st; + struct cache_time mtime_observed_on_disk; + + if (lstat(path, &st)) { + trace_printf_key(&trace_deserialize, "could not stat index"); + return DESERIALIZE_ERR; + } + mtime_observed_on_disk.sec = st.st_mtime; + mtime_observed_on_disk.nsec = ST_MTIME_NSEC(st); + if ((mtime_observed_on_disk.sec != mtime_reported->sec) || + (mtime_observed_on_disk.nsec != mtime_reported->nsec)) { + trace_printf_key(&trace_deserialize, "index mtime changed [des %d.%d][obs %d.%d]", + mtime_reported->sec, mtime_reported->nsec, + mtime_observed_on_disk.sec, mtime_observed_on_disk.nsec); + return DESERIALIZE_ERR; + } + + return DESERIALIZE_OK; +} + +static int wt_deserialize_v1_header(struct wt_status *s, int fd) +{ + struct cache_time index_mtime; + int line_len, nr_fields; + const char *line; + const char *arg; + + /* + * parse header lines up to the first flush packet. + */ + while ((line = my_packet_read_line(fd, &line_len))) { + + if (skip_prefix(line, "index_mtime ", &arg)) { + nr_fields = sscanf(arg, "%d %d", + &index_mtime.sec, + &index_mtime.nsec); + if (nr_fields != 2) { + trace_printf_key(&trace_deserialize, "invalid index_mtime (%d) '%s'", + nr_fields, line); + return DESERIALIZE_ERR; + } + continue; + } + + if (skip_prefix(line, "is_initial ", &arg)) { + s->is_initial = (int)strtol(arg, NULL, 10); + continue; + } + if (skip_prefix(line, "branch ", &arg)) { + s->branch = xstrdup(arg); + continue; + } + if (skip_prefix(line, "reference ", &arg)) { + s->reference = xstrdup(arg); + continue; + } + /* pathspec */ + /* verbose */ + /* amend */ + if (skip_prefix(line, "whence ", &arg)) { + s->whence = (int)strtol(arg, NULL, 10); + continue; + } + /* nowarn */ + /* use_color */ + /* no_gettext */ + /* display_comment_prefix */ + /* relative_paths */ + /* submodule_summary */ + if (skip_prefix(line, "show_ignored_mode ", &arg)) { + s->show_ignored_mode = (int)strtol(arg, NULL, 10); + continue; + } + if (skip_prefix(line, "show_untracked_files ", &arg)) { + s->show_untracked_files = (int)strtol(arg, NULL, 10); + continue; + } + if (skip_prefix(line, "ignore_submodule_arg ", &arg)) { + s->ignore_submodule_arg = xstrdup(arg); + continue; + } + /* color_palette */ + /* colopts */ + /* null_termination */ + /* commit_template */ + /* show_branch */ + /* show_stash */ + if (skip_prefix(line, "hints ", &arg)) { + s->hints = (int)strtol(arg, NULL, 10); + continue; + } + if (skip_prefix(line, "detect_rename ", &arg)) { + s->detect_rename = (int)strtol(arg, NULL, 10); + continue; + } + if (skip_prefix(line, "rename_score ", &arg)) { + s->rename_score = (int)strtol(arg, NULL, 10); + continue; + } + if (skip_prefix(line, "rename_limit ", &arg)) { + s->rename_limit = (int)strtol(arg, NULL, 10); + continue; + } + /* status_format */ + if (skip_prefix(line, "sha1_commit ", &arg)) { + if (get_oid_hex(arg, &s->oid_commit)) { + trace_printf_key(&trace_deserialize, "invalid sha1_commit"); + return DESERIALIZE_ERR; + } + continue; + } + if (skip_prefix(line, "committable ", &arg)) { + s->committable = (int)strtol(arg, NULL, 10); + continue; + } + if (skip_prefix(line, "workdir_dirty ", &arg)) { + s->workdir_dirty = (int)strtol(arg, NULL, 10); + continue; + } + /* prefix */ + + trace_printf_key(&trace_deserialize, "unexpected line '%s'", line); + return DESERIALIZE_ERR; + } + + return my_validate_index(&index_mtime); +} + +/* + * Build a string-list of (count) lines from the input. + */ +static int wt_deserialize_v1_changed_items(struct wt_status *s, int fd, int count) +{ + struct wt_status_serialize_data *sd; + char *p; + int line_len; + const char *line; + struct string_list_item *item; + + string_list_init(&s->change, 1); + + /* + * + + * + * + * NUL [] NUL + */ + while ((line = my_packet_read_line(fd, &line_len))) { + struct wt_status_change_data *d = xcalloc(1, sizeof(*d)); + sd = (struct wt_status_serialize_data *)line; + + d->worktree_status = ntohl(sd->fixed.worktree_status); + d->index_status = ntohl(sd->fixed.index_status); + d->stagemask = ntohl(sd->fixed.stagemask); + d->rename_score = ntohl(sd->fixed.rename_score); + d->mode_head = ntohl(sd->fixed.mode_head); + d->mode_index = ntohl(sd->fixed.mode_index); + d->mode_worktree = ntohl(sd->fixed.mode_worktree); + d->dirty_submodule = ntohl(sd->fixed.dirty_submodule); + d->new_submodule_commits = ntohl(sd->fixed.new_submodule_commits); + oidcpy(&d->oid_head, &sd->fixed.oid_head); + oidcpy(&d->oid_index, &sd->fixed.oid_index); + + p = sd->variant; + item = string_list_append(&s->change, p); + p += strlen(p) + 1; + if (*p) + d->rename_source = xstrdup(p); + item->util = d; + + trace_printf_key( + &trace_deserialize, + "change: %d %d %d %d %o %o %o %d %d %s %s '%s' '%s'", + d->worktree_status, + d->index_status, + d->stagemask, + d->rename_score, + d->mode_head, + d->mode_index, + d->mode_worktree, + d->dirty_submodule, + d->new_submodule_commits, + oid_to_hex(&d->oid_head), + oid_to_hex(&d->oid_index), + item->string, + (d->rename_source ? d->rename_source : "")); + } + + return DESERIALIZE_OK; +} + +static int wt_deserialize_v1_untracked_items(struct wt_status *s, + int fd, + int count, + enum deserialize_parse_strategy strategy) +{ + int line_len; + const char *line; + char *out = NULL; + int out_len = 0; + + string_list_init(&s->untracked, 1); + + /* + * + + * + */ + while ((line = my_packet_read_line(fd, &line_len))) { + if (strategy == DESERIALIZE_STRATEGY_AS_IS) + string_list_append(&s->untracked, line); + if (strategy == DESERIALIZE_STRATEGY_SKIP) + continue; + if (strategy == DESERIALIZE_STRATEGY_NORMAL) { + + /* Only add "normal" entries to list */ + if (out && + check_path_contains(out, out_len, line, line_len)) { + continue; + } + else { + out = string_list_append(&s->untracked, line)->string; + out_len = line_len; + } + } + if (strategy == DESERIALIZE_STRATEGY_ALL) { + /* Only add "all" entries to list */ + if (line[line_len - 1] != '/') + string_list_append(&s->untracked, line); + } + } + + return DESERIALIZE_OK; +} + +static int wt_deserialize_v1_ignored_items(struct wt_status *s, + int fd, + int count, + enum deserialize_parse_strategy strategy) +{ + int line_len; + const char *line; + + string_list_init(&s->ignored, 1); + + /* + * + + * + */ + while ((line = my_packet_read_line(fd, &line_len))) { + if (strategy == DESERIALIZE_STRATEGY_AS_IS) + string_list_append(&s->ignored, line); + else + continue; + } + + return DESERIALIZE_OK; +} + +static int validate_untracked_files_arg(enum untracked_status_type cmd, + enum untracked_status_type des, + enum deserialize_parse_strategy *strategy) +{ + *strategy = DESERIALIZE_STRATEGY_AS_IS; + + if (cmd == des) { + *strategy = DESERIALIZE_STRATEGY_AS_IS; + } else if (cmd == SHOW_NO_UNTRACKED_FILES) { + *strategy = DESERIALIZE_STRATEGY_SKIP; + } else if (des == SHOW_COMPLETE_UNTRACKED_FILES) { + if (cmd == SHOW_ALL_UNTRACKED_FILES) + *strategy = DESERIALIZE_STRATEGY_ALL; + else if (cmd == SHOW_NORMAL_UNTRACKED_FILES) + *strategy = DESERIALIZE_STRATEGY_NORMAL; + } else { + return DESERIALIZE_ERR; + } + + return DESERIALIZE_OK; +} + +static int validate_ignored_files_arg(enum show_ignored_type cmd, + enum show_ignored_type des, + enum deserialize_parse_strategy *strategy) +{ + *strategy = DESERIALIZE_STRATEGY_AS_IS; + + if (cmd == SHOW_NO_IGNORED) { + *strategy = DESERIALIZE_STRATEGY_SKIP; + } + else if (cmd != des) { + return DESERIALIZE_ERR; + } + + return DESERIALIZE_OK; +} + +static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s, int fd) +{ + int line_len; + const char *line; + const char *arg; + int nr_changed = 0; + int nr_untracked = 0; + int nr_ignored = 0; + + enum deserialize_parse_strategy ignored_strategy = DESERIALIZE_STRATEGY_AS_IS, untracked_strategy = DESERIALIZE_STRATEGY_AS_IS; + + if (wt_deserialize_v1_header(s, fd) == DESERIALIZE_ERR) + return DESERIALIZE_ERR; + + /* + * We now have the header parsed. Look at the command args (as passed in), and see how to parse + * the serialized data + */ + if (validate_untracked_files_arg(cmd_s->show_untracked_files, s->show_untracked_files, &untracked_strategy)) { + trace_printf_key(&trace_deserialize, "reject: show_untracked_file: command: %d, serialized : %d", + cmd_s->show_untracked_files, + s->show_untracked_files); + return DESERIALIZE_ERR; + } + + if (validate_ignored_files_arg(cmd_s->show_ignored_mode, s->show_ignored_mode, &ignored_strategy)) { + trace_printf_key(&trace_deserialize, "reject: show_ignored_mode: command: %d, serialized: %d", + cmd_s->show_ignored_mode, + s->show_ignored_mode); + return DESERIALIZE_ERR; + } + + /* + * [ [+] ] + * [ [+] ] + * [ [+] ] + */ + while ((line = my_packet_read_line(fd, &line_len))) { + if (skip_prefix(line, "changed ", &arg)) { + nr_changed = (int)strtol(arg, NULL, 10); + if (wt_deserialize_v1_changed_items(s, fd, nr_changed) + == DESERIALIZE_ERR) + return DESERIALIZE_ERR; + continue; + } + if (skip_prefix(line, "untracked ", &arg)) { + nr_untracked = (int)strtol(arg, NULL, 10); + if (wt_deserialize_v1_untracked_items(s, fd, nr_untracked, untracked_strategy) + == DESERIALIZE_ERR) + return DESERIALIZE_ERR; + continue; + } + if (skip_prefix(line, "ignored ", &arg)) { + nr_ignored = (int)strtol(arg, NULL, 10); + if (wt_deserialize_v1_ignored_items(s, fd, nr_ignored, ignored_strategy) + == DESERIALIZE_ERR) + return DESERIALIZE_ERR; + continue; + } + trace_printf_key(&trace_deserialize, "unexpected line '%s'", line); + return DESERIALIZE_ERR; + } + + return DESERIALIZE_OK; +} + +static int wt_deserialize_parse(const struct wt_status *cmd_s, struct wt_status *s, int fd) +{ + int line_len; + const char *line; + const char *arg; + + memset(s, 0, sizeof(*s)); + + if ((line = my_packet_read_line(fd, &line_len)) && + (skip_prefix(line, "version ", &arg))) { + int version = (int)strtol(arg, NULL, 10); + if (version == 1) + return wt_deserialize_v1(cmd_s, s, fd); + } + trace_printf_key(&trace_deserialize, "missing/unsupported version"); + return DESERIALIZE_ERR; +} + +static inline int my_strcmp_null(const char *a, const char *b) +{ + const char *alt_a = (a) ? a : ""; + const char *alt_b = (b) ? b : ""; + + return strcmp(alt_a, alt_b); +} + +static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *des_s, int fd) +{ + /* + * Check the path spec on the current command + */ + if (cmd_s->pathspec.nr > 1) { + trace_printf_key(&trace_deserialize, "reject: multiple pathspecs"); + return DESERIALIZE_ERR; + } + + /* + * If we have a pathspec, but it maches the root (e.g. no filtering) + * then this is OK. + */ + if (cmd_s->pathspec.nr == 1 && + my_strcmp_null(cmd_s->pathspec.items[0].match, "")) { + trace_printf_key(&trace_deserialize, "reject: pathspec"); + return DESERIALIZE_ERR; + } + + /* + * Deserialize cached status + */ + if (wt_deserialize_parse(cmd_s, des_s, fd) == DESERIALIZE_ERR) + return DESERIALIZE_ERR; + + /* + * Compare fields in cmd_s with those observed in des_s and + * complain if they are incompatible (such as different "-u" + * or "--ignored" settings). + */ + if (cmd_s->is_initial != des_s->is_initial) { + trace_printf_key(&trace_deserialize, "reject: is_initial"); + return DESERIALIZE_ERR; + } + if (my_strcmp_null(cmd_s->branch, des_s->branch)) { + trace_printf_key(&trace_deserialize, "reject: branch"); + return DESERIALIZE_ERR; + } + if (my_strcmp_null(cmd_s->reference, des_s->reference)) { + trace_printf_key(&trace_deserialize, "reject: reference"); + return DESERIALIZE_ERR; + } + /* verbose */ + /* amend */ + if (cmd_s->whence != des_s->whence) { + trace_printf_key(&trace_deserialize, "reject: whence"); + return DESERIALIZE_ERR; + } + /* nowarn */ + /* use_color */ + /* no_gettext */ + /* display_comment_prefix */ + /* relative_paths */ + /* submodule_summary */ + + /* show_ignored_files - already validated */ + /* show_untrackes_files - already validated */ + + /* + * Submodules are not supported by status serialization. + * The status will not be serialized if it contains submodules, + * and so this check is not needed. + * + * if (my_strcmp_null(cmd_s->ignore_submodule_arg, des_s->ignore_submodule_arg)) { + * trace_printf_key(&trace_deserialize, "reject: ignore_submodule_arg"); + * return DESERIALIZE_ERR; + * } + */ + + /* color_palette */ + /* colopts */ + /* null_termination */ + /* commit_template */ + /* show_branch */ + /* show_stash */ + /* hints */ + if (cmd_s->detect_rename != des_s->detect_rename) { + trace_printf_key(&trace_deserialize, "reject: detect_rename"); + return DESERIALIZE_ERR; + } + if (cmd_s->rename_score != des_s->rename_score) { + trace_printf_key(&trace_deserialize, "reject: rename_score"); + return DESERIALIZE_ERR; + } + if (cmd_s->rename_limit != des_s->rename_limit) { + trace_printf_key(&trace_deserialize, "reject: rename_limit"); + return DESERIALIZE_ERR; + } + /* status_format */ + if (!oideq(&cmd_s->oid_commit, &des_s->oid_commit)) { + trace_printf_key(&trace_deserialize, "reject: sha1_commit"); + return DESERIALIZE_ERR; + } + + /* + * Copy over display-related fields from the current command. + */ + des_s->verbose = cmd_s->verbose; + /* amend */ + /* whence */ + des_s->nowarn = cmd_s->nowarn; + des_s->use_color = cmd_s->use_color; + des_s->no_gettext = cmd_s->no_gettext; + des_s->display_comment_prefix = cmd_s->display_comment_prefix; + des_s->relative_paths = cmd_s->relative_paths; + des_s->submodule_summary = cmd_s->submodule_summary; + memcpy(des_s->color_palette, cmd_s->color_palette, + sizeof(char)*WT_STATUS_MAXSLOT*COLOR_MAXLEN); + des_s->colopts = cmd_s->colopts; + des_s->null_termination = cmd_s->null_termination; + /* commit_template */ + des_s->show_branch = cmd_s->show_branch; + des_s->show_stash = cmd_s->show_stash; + /* hints */ + des_s->status_format = cmd_s->status_format; + des_s->fp = cmd_s->fp; + if (cmd_s->prefix && *cmd_s->prefix) + des_s->prefix = xstrdup(cmd_s->prefix); + + return DESERIALIZE_OK; +} + + +/* + * Read raw serialized status data from the given file + * + * Verify that the args specified in the current command + * are compatible with the deserialized data (such as "-uno"). + * + * Copy display-related fields from the current command + * into the deserialized data (so that the user can request + * long or short as they please). + */ +int wt_status_deserialize(const struct wt_status *cmd_s, + const char *path) +{ + struct wt_status des_s; + int result; + + if (path && *path && strcmp(path, "0")) { + int fd = xopen(path, O_RDONLY); + if (fd == -1) { + trace_printf_key(&trace_deserialize, "could not read '%s'", path); + return DESERIALIZE_ERR; + } + trace_printf_key(&trace_deserialize, "reading serialization file '%s'", path); + result = wt_deserialize_fd(cmd_s, &des_s, fd); + close(fd); + } else { + trace_printf_key(&trace_deserialize, "reading stdin"); + result = wt_deserialize_fd(cmd_s, &des_s, 0); + } + + if (result == DESERIALIZE_OK) { + wt_status_get_state(cmd_s->repo, &des_s.state, des_s.branch && + !strcmp(des_s.branch, "HEAD")); + wt_status_print(&des_s); + } + + return result; +} diff --git a/wt-status-serialize.c b/wt-status-serialize.c new file mode 100644 index 00000000000000..60da2d17d16182 --- /dev/null +++ b/wt-status-serialize.c @@ -0,0 +1,213 @@ +#include "cache.h" +#include "wt-status.h" +#include "pkt-line.h" + +static struct trace_key trace_serialize = TRACE_KEY_INIT(SERIALIZE); + +/* + * Write V1 header fields. + */ +static void wt_serialize_v1_header(struct wt_status *s, int fd) +{ + /* + * Write select fields from the current index to help + * the deserializer recognize a stale data set. + */ + packet_write_fmt(fd, "index_mtime %d %d\n", + s->repo->index->timestamp.sec, + s->repo->index->timestamp.nsec); + + /* + * Write data from wt_status to qualify this status report. + * That is, if this run specified "-uno", the consumer of + * our serialization should know that. + */ + packet_write_fmt(fd, "is_initial %d\n", s->is_initial); + if (s->branch) + packet_write_fmt(fd, "branch %s\n", s->branch); + if (s->reference) + packet_write_fmt(fd, "reference %s\n", s->reference); + /* pathspec */ + /* verbose */ + /* amend */ + packet_write_fmt(fd, "whence %d\n", s->whence); + /* nowarn */ + /* use_color */ + /* no_gettext */ + /* display_comment_prefix */ + /* relative_paths */ + /* submodule_summary */ + packet_write_fmt(fd, "show_ignored_mode %d\n", s->show_ignored_mode); + packet_write_fmt(fd, "show_untracked_files %d\n", s->show_untracked_files); + if (s->ignore_submodule_arg) + packet_write_fmt(fd, "ignore_submodule_arg %s\n", s->ignore_submodule_arg); + /* color_palette */ + /* colopts */ + /* null_termination */ + /* commit_template */ + /* show_branch */ + /* show_stash */ + packet_write_fmt(fd, "hints %d\n", s->hints); + packet_write_fmt(fd, "detect_rename %d\n", s->detect_rename); + packet_write_fmt(fd, "rename_score %d\n", s->rename_score); + packet_write_fmt(fd, "rename_limit %d\n", s->rename_limit); + /* status_format */ + packet_write_fmt(fd, "sha1_commit %s\n", oid_to_hex(&s->oid_commit)); + packet_write_fmt(fd, "committable %d\n", s->committable); + packet_write_fmt(fd, "workdir_dirty %d\n", s->workdir_dirty); + /* prefix */ + packet_flush(fd); +} + +/* + * Print changed/unmerged items. + * We write raw (not c-quoted) pathname(s). The rename_source is only + * set when status computed a rename/copy. + * + * We ALWAYS write a final LF to the packet-line (for debugging) + * even though Linux pathnames allow LFs. + */ +static inline void wt_serialize_v1_changed(struct wt_status *s, int fd, + struct string_list_item *item) +{ + struct wt_status_change_data *d = item->util; + struct wt_status_serialize_data sd; + char *begin; + char *end; + char *p; + int len_path, len_rename_source; + + trace_printf_key(&trace_serialize, + "change: %d %d %d %d %o %o %o %d %d %s %s '%s' '%s'", + d->worktree_status, + d->index_status, + d->stagemask, + d->rename_score, + d->mode_head, + d->mode_index, + d->mode_worktree, + d->dirty_submodule, + d->new_submodule_commits, + oid_to_hex(&d->oid_head), + oid_to_hex(&d->oid_index), + item->string, + (d->rename_source ? d->rename_source : "")); + + sd.fixed.worktree_status = htonl(d->worktree_status); + sd.fixed.index_status = htonl(d->index_status); + sd.fixed.stagemask = htonl(d->stagemask); + sd.fixed.rename_score = htonl(d->rename_score); + sd.fixed.mode_head = htonl(d->mode_head); + sd.fixed.mode_index = htonl(d->mode_index); + sd.fixed.mode_worktree = htonl(d->mode_worktree); + sd.fixed.dirty_submodule = htonl(d->dirty_submodule); + sd.fixed.new_submodule_commits = htonl(d->new_submodule_commits); + oidcpy(&sd.fixed.oid_head, &d->oid_head); + oidcpy(&sd.fixed.oid_index, &d->oid_index); + + begin = (char *)&sd; + end = begin + sizeof(sd); + + p = sd.variant; + + /* + * Write NUL [] NUL LF at the end of the buffer. + */ + len_path = strlen(item->string); + len_rename_source = d->rename_source ? strlen(d->rename_source) : 0; + + /* + * This is a bit of a hack, but I don't want to split the + * status detail record across multiple pkt-lines. + */ + if (p + len_path + 1 + len_rename_source + 1 + 1 >= end) + BUG("path to long to serialize '%s'", item->string); + + memcpy(p, item->string, len_path); + p += len_path; + *p++ = '\0'; + + if (len_rename_source) { + memcpy(p, d->rename_source, len_rename_source); + p += len_rename_source; + } + *p++ = '\0'; + *p++ = '\n'; + + if (packet_write_gently(fd, begin, (p - begin))) + BUG("cannot serialize '%s'", item->string); +} + +/* + * Write raw (not c-quoted) pathname for an untracked item. + * We ALWAYS write a final LF to the packet-line (for debugging) + * even though Linux pathnames allows LFs. That is, deserialization + * should use the packet-line length and omit the final LF. + */ +static inline void wt_serialize_v1_untracked(struct wt_status *s, int fd, + struct string_list_item *item) +{ + packet_write_fmt(fd, "%s\n", item->string); +} + +/* + * Write raw (not c-quoted) pathname for an ignored item. + * We ALWAYS write a final LF to the packet-line (for debugging) + * even though Linux pathnames allows LFs. + */ +static inline void wt_serialize_v1_ignored(struct wt_status *s, int fd, + struct string_list_item *item) +{ + packet_write_fmt(fd, "%s\n", item->string); +} + +/* + * Serialize the list of changes to stdout. The goal of this + * is to just serialize the key fields in wt_status so that a + * later command can rebuilt it and do the printing. + * + * We DO NOT include the contents of wt_status_state NOR + * current branch info. This info easily gets stale and + * is relatively quick for the status consumer to compute + * as necessary. + */ +void wt_status_serialize_v1(struct wt_status *s) +{ + int fd = 1; /* we always write to stdout */ + struct string_list_item *iter; + int k; + + /* + * version header must be first line. + */ + packet_write_fmt(fd, "version 1\n"); + + wt_serialize_v1_header(s, fd); + + if (s->change.nr > 0) { + packet_write_fmt(fd, "changed %d\n", s->change.nr); + for (k = 0; k < s->change.nr; k++) { + iter = &(s->change.items[k]); + wt_serialize_v1_changed(s, fd, iter); + } + packet_flush(fd); + } + + if (s->untracked.nr > 0) { + packet_write_fmt(fd, "untracked %d\n", s->untracked.nr); + for (k = 0; k < s->untracked.nr; k++) { + iter = &(s->untracked.items[k]); + wt_serialize_v1_untracked(s, fd, iter); + } + packet_flush(fd); + } + + if (s->ignored.nr > 0) { + packet_write_fmt(fd, "ignored %d\n", s->ignored.nr); + for (k = 0; k < s->ignored.nr; k++) { + iter = &(s->ignored.items[k]); + wt_serialize_v1_ignored(s, fd, iter); + } + packet_flush(fd); + } +} diff --git a/wt-status.c b/wt-status.c index 98dfa6f73f9d7c..68f34b1240d8e4 100644 --- a/wt-status.c +++ b/wt-status.c @@ -707,6 +707,9 @@ static void wt_status_collect_untracked(struct wt_status *s) if (s->show_untracked_files != SHOW_ALL_UNTRACKED_FILES) dir.flags |= DIR_SHOW_OTHER_DIRECTORIES | DIR_HIDE_EMPTY_DIRECTORIES; + if (s->show_untracked_files == SHOW_COMPLETE_UNTRACKED_FILES) + dir.flags |= DIR_KEEP_UNTRACKED_CONTENTS; + if (s->show_ignored_mode) { dir.flags |= DIR_SHOW_IGNORED_TOO; @@ -2388,6 +2391,9 @@ void wt_status_print(struct wt_status *s) case STATUS_FORMAT_LONG: wt_longstatus_print(s); break; + case STATUS_FORMAT_SERIALIZE_V1: + wt_status_serialize_v1(s); + break; } trace2_region_leave("status", "print", s->repo); diff --git a/wt-status.h b/wt-status.h index 73ab5d4da1c0b4..c9ea11033a1849 100644 --- a/wt-status.h +++ b/wt-status.h @@ -4,6 +4,7 @@ #include "string-list.h" #include "color.h" #include "pathspec.h" +#include "pkt-line.h" #include "remote.h" struct repository; @@ -25,7 +26,8 @@ enum color_wt_status { enum untracked_status_type { SHOW_NO_UNTRACKED_FILES, SHOW_NORMAL_UNTRACKED_FILES, - SHOW_ALL_UNTRACKED_FILES + SHOW_ALL_UNTRACKED_FILES, + SHOW_COMPLETE_UNTRACKED_FILES, }; enum show_ignored_type { @@ -73,6 +75,7 @@ enum wt_status_format { STATUS_FORMAT_SHORT, STATUS_FORMAT_PORCELAIN, STATUS_FORMAT_PORCELAIN_V2, + STATUS_FORMAT_SERIALIZE_V1, STATUS_FORMAT_UNSPECIFIED }; @@ -174,4 +177,51 @@ int require_clean_work_tree(struct repository *repo, int ignore_submodules, int gently); +#define DESERIALIZE_OK 0 +#define DESERIALIZE_ERR 1 + +struct wt_status_serialize_data_fixed +{ + uint32_t worktree_status; + uint32_t index_status; + uint32_t stagemask; + uint32_t rename_score; + uint32_t mode_head; + uint32_t mode_index; + uint32_t mode_worktree; + uint32_t dirty_submodule; + uint32_t new_submodule_commits; + struct object_id oid_head; + struct object_id oid_index; +}; + +/* + * Consume the maximum amount of data possible in a + * packet-line record. This is overkill because we + * have at most 2 relative pathnames, but means we + * don't need to allocate a variable length structure. + */ +struct wt_status_serialize_data +{ + struct wt_status_serialize_data_fixed fixed; + char variant[LARGE_PACKET_DATA_MAX + - sizeof(struct wt_status_serialize_data_fixed)]; +}; + +/* + * Serialize computed status scan results using "version 1" format + * to the given file. + */ +void wt_status_serialize_v1(struct wt_status *s); + +/* + * Deserialize existing status results from the given file and + * populate a (new) "struct wt_status". Use the contents of "cmd_s" + * (computed from the command line arguments) to verify that the + * cached data is compatible and overlay various display-related + * fields. + */ +int wt_status_deserialize(const struct wt_status *cmd_s, + const char *path); + #endif /* STATUS_H */ From ecaf77217fc11962dc665ecf6c1e000d9c2b4a65 Mon Sep 17 00:00:00 2001 From: Jameson Miller Date: Wed, 10 Jan 2018 11:56:26 -0500 Subject: [PATCH 019/104] Teach ahead-behind and serialized status to play nicely together --- t/t7524-serialized-status.sh | 26 ++++++++++++++++++++++++++ wt-status-deserialize.c | 3 ++- wt-status-serialize.c | 1 + 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/t/t7524-serialized-status.sh b/t/t7524-serialized-status.sh index a0fc1bc2daf6ec..6bcd301dbb7913 100755 --- a/t/t7524-serialized-status.sh +++ b/t/t7524-serialized-status.sh @@ -137,4 +137,30 @@ test_expect_success 'verify serialized status handles path scopes' ' test_i18ncmp expect output ' +test_expect_success 'verify no-ahead-behind and serialized status integration' ' + test_when_finished "rm serialized_status.dat new_change.txt output" && + cat >expect <<-\EOF && + # branch.oid 68d4a437ea4c2de65800f48c053d4d543b55c410 + # branch.head alt_branch + # branch.upstream master + # branch.ab +1 -0 + ? expect + ? serialized_status.dat + ? untracked/ + ? untracked_1.txt + EOF + + git checkout -b alt_branch master --track >/dev/null && + touch alt_branch_changes.txt && + git add alt_branch_changes.txt && + test_tick && + git commit -m"New commit on alt branch" && + + git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat && + touch new_change.txt && + + git -c status.aheadBehind=false status --porcelain=v2 --branch --ahead-behind --deserialize=serialized_status.dat >output && + test_i18ncmp expect output +' + test_done diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c index 317b88fddc1ec0..cc4ae31d7a54b3 100644 --- a/wt-status-deserialize.c +++ b/wt-status-deserialize.c @@ -510,6 +510,7 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de /* show_branch */ /* show_stash */ /* hints */ + /* ahead_behind_flags */ if (cmd_s->detect_rename != des_s->detect_rename) { trace_printf_key(&trace_deserialize, "reject: detect_rename"); return DESERIALIZE_ERR; @@ -548,6 +549,7 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de des_s->show_branch = cmd_s->show_branch; des_s->show_stash = cmd_s->show_stash; /* hints */ + des_s->ahead_behind_flags = cmd_s->ahead_behind_flags; des_s->status_format = cmd_s->status_format; des_s->fp = cmd_s->fp; if (cmd_s->prefix && *cmd_s->prefix) @@ -556,7 +558,6 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de return DESERIALIZE_OK; } - /* * Read raw serialized status data from the given file * diff --git a/wt-status-serialize.c b/wt-status-serialize.c index 60da2d17d16182..a93d2dc9effe4e 100644 --- a/wt-status-serialize.c +++ b/wt-status-serialize.c @@ -48,6 +48,7 @@ static void wt_serialize_v1_header(struct wt_status *s, int fd) /* show_branch */ /* show_stash */ packet_write_fmt(fd, "hints %d\n", s->hints); + /* ahead_behind_flags */ packet_write_fmt(fd, "detect_rename %d\n", s->detect_rename); packet_write_fmt(fd, "rename_score %d\n", s->rename_score); packet_write_fmt(fd, "rename_limit %d\n", s->rename_limit); From 146e74d0c6139f8a449bf0c8155bed686d759569 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Fri, 2 Feb 2018 14:17:05 -0500 Subject: [PATCH 020/104] status: serialize to path Teach status serialization to take an optional pathname on the command line to direct that cache data be written there rather than to stdout. When used this way, normal status results will still be written to stdout. When no path is given, only binary serialization data is written to stdout. Usage: git status --serialize[=] Signed-off-by: Jeff Hostetler --- Documentation/git-status.txt | 10 ++++++---- builtin/commit.c | 36 +++++++++++++++++++++++++++--------- t/t7524-serialized-status.sh | 23 +++++++++++++++++++++++ wt-status-serialize.c | 5 ++--- wt-status.c | 2 +- wt-status.h | 2 +- 6 files changed, 60 insertions(+), 18 deletions(-) diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt index 990bba01a980c7..c5b21372c2f301 100644 --- a/Documentation/git-status.txt +++ b/Documentation/git-status.txt @@ -156,10 +156,12 @@ ignored, then the directory is not shown, but all contents are shown. update it afterwards if any changes were detected. Defaults to `--lock-index`. ---serialize[=]:: - (EXPERIMENTAL) Serialize raw status results to stdout in a - format suitable for use by `--deserialize`. Valid values for - `` are "1" and "v1". +--serialize[=]:: + (EXPERIMENTAL) Serialize raw status results to a file or stdout + in a format suitable for use by `--deserialize`. If a path is + given, serialize data will be written to that path *and* normal + status output will be written to stdout. If path is omitted, + only binary serialization data will be written to stdout. --deserialize[=]:: (EXPERIMENTAL) Deserialize raw status results from a file or diff --git a/builtin/commit.c b/builtin/commit.c index 35cf86068b8403..d191f151f1c367 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -149,26 +149,34 @@ static int opt_parse_porcelain(const struct option *opt, const char *arg, int un } static int do_serialize = 0; +static char *serialize_path = NULL; + static int do_implicit_deserialize = 0; static int do_explicit_deserialize = 0; static char *deserialize_path = NULL; /* - * --serialize | --serialize=1 | --serialize=v1 + * --serialize | --serialize= + * + * Request that we serialize status output rather than or in addition to + * printing in any of the established formats. + * + * Without a path, we write binary serialization data to stdout (and omit + * the normal status output). * - * Request that we serialize our output rather than printing in - * any of the established formats. Optionally specify serialization - * version. + * With a path, we write binary serialization data to the and then + * write normal status output. */ static int opt_parse_serialize(const struct option *opt, const char *arg, int unset) { enum wt_status_format *value = (enum wt_status_format *)opt->value; if (unset || !arg) *value = STATUS_FORMAT_SERIALIZE_V1; - else if (!strcmp(arg, "v1") || !strcmp(arg, "1")) - *value = STATUS_FORMAT_SERIALIZE_V1; - else - die("unsupported serialize version '%s'", arg); + + if (arg) { + free(serialize_path); + serialize_path = xstrdup(arg); + } if (do_explicit_deserialize) die("cannot mix --serialize and --deserialize"); @@ -1458,7 +1466,7 @@ int cmd_status(int argc, const char **argv, const char *prefix) N_("version"), N_("machine-readable output"), PARSE_OPT_OPTARG, opt_parse_porcelain), { OPTION_CALLBACK, 0, "serialize", &status_format, - N_("version"), N_("serialize raw status data to stdout"), + N_("path"), N_("serialize raw status data to path or stdout"), PARSE_OPT_OPTARG | PARSE_OPT_NONEG, opt_parse_serialize }, { OPTION_CALLBACK, 0, "deserialize", NULL, N_("path"), N_("deserialize raw status data from file"), @@ -1600,6 +1608,16 @@ int cmd_status(int argc, const char **argv, const char *prefix) if (s.relative_paths) s.prefix = prefix; + if (serialize_path) { + int fd_serialize = xopen(serialize_path, + O_WRONLY | O_CREAT | O_TRUNC, 0666); + if (fd_serialize < 0) + die_errno(_("could not serialize to '%s'"), + serialize_path); + wt_status_serialize_v1(fd_serialize, &s); + close(fd_serialize); + } + wt_status_print(&s); wt_status_collect_free_buffers(&s); diff --git a/t/t7524-serialized-status.sh b/t/t7524-serialized-status.sh index 6bcd301dbb7913..d1ca3af3f96546 100755 --- a/t/t7524-serialized-status.sh +++ b/t/t7524-serialized-status.sh @@ -163,4 +163,27 @@ test_expect_success 'verify no-ahead-behind and serialized status integration' ' test_i18ncmp expect output ' +test_expect_success 'verify new --serialize=path mode' ' + #test_when_finished "rm serialized_status.dat expect new_change.txt output.1 output.2" && + cat >expect <<-\EOF && + ? expect + ? output.1 + ? untracked/ + ? untracked_1.txt + EOF + + git checkout -b serialize_path_branch master --track >/dev/null && + touch alt_branch_changes.txt && + git add alt_branch_changes.txt && + test_tick && + git commit -m"New commit on serialize_path_branch" && + + git status --porcelain=v2 --serialize=serialized_status.dat >output.1 && + touch new_change.txt && + + git status --porcelain=v2 --deserialize=serialized_status.dat >output.2 && + test_i18ncmp expect output.1 && + test_i18ncmp expect output.2 +' + test_done diff --git a/wt-status-serialize.c b/wt-status-serialize.c index a93d2dc9effe4e..39d601ebb8b3fb 100644 --- a/wt-status-serialize.c +++ b/wt-status-serialize.c @@ -163,7 +163,7 @@ static inline void wt_serialize_v1_ignored(struct wt_status *s, int fd, } /* - * Serialize the list of changes to stdout. The goal of this + * Serialize the list of changes to the given file. The goal of this * is to just serialize the key fields in wt_status so that a * later command can rebuilt it and do the printing. * @@ -172,9 +172,8 @@ static inline void wt_serialize_v1_ignored(struct wt_status *s, int fd, * is relatively quick for the status consumer to compute * as necessary. */ -void wt_status_serialize_v1(struct wt_status *s) +void wt_status_serialize_v1(int fd, struct wt_status *s) { - int fd = 1; /* we always write to stdout */ struct string_list_item *iter; int k; diff --git a/wt-status.c b/wt-status.c index 68f34b1240d8e4..1e6caf2412ecb1 100644 --- a/wt-status.c +++ b/wt-status.c @@ -2392,7 +2392,7 @@ void wt_status_print(struct wt_status *s) wt_longstatus_print(s); break; case STATUS_FORMAT_SERIALIZE_V1: - wt_status_serialize_v1(s); + wt_status_serialize_v1(1, s); break; } diff --git a/wt-status.h b/wt-status.h index c9ea11033a1849..1223543ce9ec58 100644 --- a/wt-status.h +++ b/wt-status.h @@ -212,7 +212,7 @@ struct wt_status_serialize_data * Serialize computed status scan results using "version 1" format * to the given file. */ -void wt_status_serialize_v1(struct wt_status *s); +void wt_status_serialize_v1(int fd, struct wt_status *s); /* * Deserialize existing status results from the given file and From 7eb4ebe27c2deae6ffe621b540710e13eeedb51f Mon Sep 17 00:00:00 2001 From: Alejandro Pauly Date: Mon, 10 Apr 2017 13:26:14 -0400 Subject: [PATCH 021/104] Pass PID of git process to hooks. Signed-off-by: Alejandro Pauly --- git.c | 1 + t/t0400-pre-command-hook.sh | 3 ++- t/t0401-post-command-hook.sh | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/git.c b/git.c index 27a81a83b0c0d1..f5a872a8e4449d 100644 --- a/git.c +++ b/git.c @@ -426,6 +426,7 @@ static int run_pre_command_hook(const char **argv) /* call the hook proc */ argv_array_pushv(&sargv, argv); + argv_array_pushf(&sargv, "--git-pid=%"PRIuMAX, (uintmax_t)getpid()); ret = run_hook_argv(NULL, "pre-command", sargv.argv); if (!ret) diff --git a/t/t0400-pre-command-hook.sh b/t/t0400-pre-command-hook.sh index 83c453c9643eae..f04a55a695bc97 100755 --- a/t/t0400-pre-command-hook.sh +++ b/t/t0400-pre-command-hook.sh @@ -13,7 +13,8 @@ test_expect_success 'with no hook' ' test_expect_success 'with succeeding hook' ' mkdir -p .git/hooks && write_script .git/hooks/pre-command <<-EOF && - echo "\$*" >\$(git rev-parse --git-dir)/pre-command.out + echo "\$*" | sed "s/ --git-pid=[0-9]*//" \ + >\$(git rev-parse --git-dir)/pre-command.out EOF echo "second" >> file && git add file && diff --git a/t/t0401-post-command-hook.sh b/t/t0401-post-command-hook.sh index 64646f7ad03b57..fcbfc4a0c79c1e 100755 --- a/t/t0401-post-command-hook.sh +++ b/t/t0401-post-command-hook.sh @@ -13,7 +13,8 @@ test_expect_success 'with no hook' ' test_expect_success 'with succeeding hook' ' mkdir -p .git/hooks && write_script .git/hooks/post-command <<-EOF && - echo "\$*" >\$(git rev-parse --git-dir)/post-command.out + echo "\$*" | sed "s/ --git-pid=[0-9]*//" \ + >\$(git rev-parse --git-dir)/post-command.out EOF echo "second" >> file && git add file && From ebe420e1ddb44cdb65cd0e2023816cadc43e652f Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Wed, 7 Feb 2018 10:59:03 -0500 Subject: [PATCH 022/104] status: reject deserialize in V2 and conflicts Teach status deserialize code to reject status cache when printing in porcelain V2 and there are unresolved conflicts in the cache file. A follow-on task might extend the cache format to include this additiona data. See code for longer explanation. Signed-off-by: Jeff Hostetler --- t/t7524-serialized-status.sh | 84 +++++++++++++++++++++++++++++++++++- wt-status-deserialize.c | 28 +++++++++++- 2 files changed, 109 insertions(+), 3 deletions(-) diff --git a/t/t7524-serialized-status.sh b/t/t7524-serialized-status.sh index d1ca3af3f96546..2e01b988985a00 100755 --- a/t/t7524-serialized-status.sh +++ b/t/t7524-serialized-status.sh @@ -164,7 +164,7 @@ test_expect_success 'verify no-ahead-behind and serialized status integration' ' ' test_expect_success 'verify new --serialize=path mode' ' - #test_when_finished "rm serialized_status.dat expect new_change.txt output.1 output.2" && + test_when_finished "rm serialized_status.dat expect new_change.txt output.1 output.2" && cat >expect <<-\EOF && ? expect ? output.1 @@ -186,4 +186,86 @@ test_expect_success 'verify new --serialize=path mode' ' test_i18ncmp expect output.2 ' +test_expect_success 'merge conflicts' ' + + # create a merge conflict. + + git init conflicts && + echo x >conflicts/x.txt && + git -C conflicts add x.txt && + git -C conflicts commit -m x && + git -C conflicts branch a && + git -C conflicts branch b && + git -C conflicts checkout a && + echo y >conflicts/x.txt && + git -C conflicts add x.txt && + git -C conflicts commit -m a && + git -C conflicts checkout b && + echo z >conflicts/x.txt && + git -C conflicts add x.txt && + git -C conflicts commit -m b && + test_must_fail git -C conflicts merge --no-commit a && + + # verify that regular status correctly identifies it + # in each format. + + cat >expect.v2 <observed.v2 && + test_cmp expect.v2 observed.v2 && + + cat >expect.long <..." to mark resolution) + both modified: x.txt + +no changes added to commit (use "git add" and/or "git commit -a") +EOF + git -C conflicts status --long >observed.long && + test_i18ncmp expect.long observed.long && + + cat >expect.short <observed.short && + test_cmp expect.short observed.short && + + # save status data in serialized cache. + + git -C conflicts status --serialize >serialized && + + # make some dirt in the worktree so we can tell whether subsequent + # status commands used the cached data or did a fresh status. + + echo dirt >conflicts/dirt.txt && + + # run status using the cached data. + + git -C conflicts status --long --deserialize=../serialized >observed.long && + test_i18ncmp expect.long observed.long && + + git -C conflicts status --short --deserialize=../serialized >observed.short && + test_cmp expect.short observed.short && + + # currently, the cached data does not have enough information about + # merge conflicts for porcelain V2 format. (And V2 format looks at + # the index to get that data, but the whole point of the serialization + # is to avoid reading the index unnecessarily.) So V2 always rejects + # the cached data when there is an unresolved conflict. + + cat >expect.v2.dirty <observed.v2 && + test_cmp expect.v2.dirty observed.v2 + +' + test_done diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c index cc4ae31d7a54b3..a4b5a37d0120dc 100644 --- a/wt-status-deserialize.c +++ b/wt-status-deserialize.c @@ -176,7 +176,8 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd) /* * Build a string-list of (count) lines from the input. */ -static int wt_deserialize_v1_changed_items(struct wt_status *s, int fd, int count) +static int wt_deserialize_v1_changed_items(const struct wt_status *cmd_s, + struct wt_status *s, int fd, int count) { struct wt_status_serialize_data *sd; char *p; @@ -231,6 +232,29 @@ static int wt_deserialize_v1_changed_items(struct wt_status *s, int fd, int coun oid_to_hex(&d->oid_index), item->string, (d->rename_source ? d->rename_source : "")); + + if (d->stagemask && + cmd_s->status_format == STATUS_FORMAT_PORCELAIN_V2) { + /* + * We have an unresolved conflict and the user wants + * to see porcelain V2 output. The cached status data + * does not contain enough information for V2 (because + * the main status computation does not capture it). + * We only get a single change record for the file with + * a single SHA -- we don't get the stage [123] mode + * and SHA data. The V2 detail-line print code looks + * up this information directly from the index. The + * whole point of this serialization cache is to avoid + * reading the index, so the V2 print code gets zeros. + * So we reject the status cache and let the fallback + * code run. + */ + trace_printf_key( + &trace_deserialize, + "reject: V2 format and umerged file: %s", + item->string); + return DESERIALIZE_ERR; + } } return DESERIALIZE_OK; @@ -381,7 +405,7 @@ static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s, while ((line = my_packet_read_line(fd, &line_len))) { if (skip_prefix(line, "changed ", &arg)) { nr_changed = (int)strtol(arg, NULL, 10); - if (wt_deserialize_v1_changed_items(s, fd, nr_changed) + if (wt_deserialize_v1_changed_items(cmd_s, s, fd, nr_changed) == DESERIALIZE_ERR) return DESERIALIZE_ERR; continue; From 5df40b350e8084d71f07139080c00c5b1b3aaa55 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Tue, 8 Aug 2017 00:27:50 +0200 Subject: [PATCH 023/104] pre-command: always respect core.hooksPath We need to respect that config setting even if we already know that we have a repository, but have not yet read the config. The regression test was written by Alejandro Pauly. Signed-off-by: Johannes Schindelin --- run-command.c | 13 +++++++++++-- t/t0400-pre-command-hook.sh | 11 +++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/run-command.c b/run-command.c index 29067f9fd9286e..d9b549a1d18e5b 100644 --- a/run-command.c +++ b/run-command.c @@ -1358,9 +1358,18 @@ const char *find_hook(const char *name) static struct strbuf path = STRBUF_INIT; strbuf_reset(&path); - if (have_git_dir()) + if (have_git_dir()) { + static int forced_config; + + if (!forced_config) { + if (!git_hooks_path) + git_config_get_pathname("core.hookspath", + &git_hooks_path); + forced_config = 1; + } + strbuf_git_path(&path, "hooks/%s", name); - else if (!hook_path_early(name, &path)) + } else if (!hook_path_early(name, &path)) return NULL; if (access(path.buf, X_OK) < 0) { diff --git a/t/t0400-pre-command-hook.sh b/t/t0400-pre-command-hook.sh index f04a55a695bc97..f2a9115e299385 100755 --- a/t/t0400-pre-command-hook.sh +++ b/t/t0400-pre-command-hook.sh @@ -55,4 +55,15 @@ test_expect_success 'in a subdirectory, using an alias' ' test_line_count = 2 sub/log ' +test_expect_success 'with core.hooksPath' ' + mkdir -p .git/alternateHooks && + write_script .git/alternateHooks/pre-command <<-EOF && + echo "alternate" >\$(git rev-parse --git-dir)/pre-command.out + EOF + write_script .git/hooks/pre-command <<-EOF && + echo "original" >\$(git rev-parse --git-dir)/pre-command.out + EOF + git -c core.hooksPath=.git/alternateHooks status && + test "alternate" = "$(cat .git/pre-command.out)" +' test_done From a035faeed99f589c1811567c7daf6e235c97bb6b Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Wed, 25 Jul 2018 12:03:22 -0400 Subject: [PATCH 024/104] status: fix rename reporting when using serialization cache Fix "git status --deserialize" to correctly report both pathnames for renames. Add a test case to verify. A change was made upstream that added an additional "rename_status" field to the "struct wt_status_change_data" structure. It is used during the various print routines to decide if 2 pathnames need to be printed. 5134ccde642ae9ed6a244c92864c26734d100f4c wt-status.c: rename rename-related fields in wt_status_change_data The fix here is to add that field to the status cache data. Signed-off-by: Jeff Hostetler --- t/t7524-serialized-status.sh | 12 ++++++++++++ wt-status-deserialize.c | 4 +++- wt-status-serialize.c | 4 +++- wt-status.h | 1 + 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/t/t7524-serialized-status.sh b/t/t7524-serialized-status.sh index 2e01b988985a00..c0c2a4f2c4dd1a 100755 --- a/t/t7524-serialized-status.sh +++ b/t/t7524-serialized-status.sh @@ -268,4 +268,16 @@ EOF ' +test_expect_success 'renames' ' + git init rename_test && + echo OLDNAME >rename_test/OLDNAME && + git -C rename_test add OLDNAME && + git -C rename_test commit -m OLDNAME && + git -C rename_test mv OLDNAME NEWNAME && + git -C rename_test status --serialize=renamed.dat >output.1 && + echo DIRT >rename_test/DIRT && + git -C rename_test status --deserialize=renamed.dat >output.2 && + test_i18ncmp output.1 output.2 +' + test_done diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c index a4b5a37d0120dc..ee00c1b3a366f8 100644 --- a/wt-status-deserialize.c +++ b/wt-status-deserialize.c @@ -200,6 +200,7 @@ static int wt_deserialize_v1_changed_items(const struct wt_status *cmd_s, d->worktree_status = ntohl(sd->fixed.worktree_status); d->index_status = ntohl(sd->fixed.index_status); d->stagemask = ntohl(sd->fixed.stagemask); + d->rename_status = ntohl(sd->fixed.rename_status); d->rename_score = ntohl(sd->fixed.rename_score); d->mode_head = ntohl(sd->fixed.mode_head); d->mode_index = ntohl(sd->fixed.mode_index); @@ -218,10 +219,11 @@ static int wt_deserialize_v1_changed_items(const struct wt_status *cmd_s, trace_printf_key( &trace_deserialize, - "change: %d %d %d %d %o %o %o %d %d %s %s '%s' '%s'", + "change: %d %d %d %d %d %o %o %o %d %d %s %s '%s' '%s'", d->worktree_status, d->index_status, d->stagemask, + d->rename_status, d->rename_score, d->mode_head, d->mode_index, diff --git a/wt-status-serialize.c b/wt-status-serialize.c index 39d601ebb8b3fb..6e0d85c60b3e74 100644 --- a/wt-status-serialize.c +++ b/wt-status-serialize.c @@ -79,10 +79,11 @@ static inline void wt_serialize_v1_changed(struct wt_status *s, int fd, int len_path, len_rename_source; trace_printf_key(&trace_serialize, - "change: %d %d %d %d %o %o %o %d %d %s %s '%s' '%s'", + "change: %d %d %d %d %d %o %o %o %d %d %s %s '%s' '%s'", d->worktree_status, d->index_status, d->stagemask, + d->rename_status, d->rename_score, d->mode_head, d->mode_index, @@ -97,6 +98,7 @@ static inline void wt_serialize_v1_changed(struct wt_status *s, int fd, sd.fixed.worktree_status = htonl(d->worktree_status); sd.fixed.index_status = htonl(d->index_status); sd.fixed.stagemask = htonl(d->stagemask); + sd.fixed.rename_status = htonl(d->rename_status); sd.fixed.rename_score = htonl(d->rename_score); sd.fixed.mode_head = htonl(d->mode_head); sd.fixed.mode_index = htonl(d->mode_index); diff --git a/wt-status.h b/wt-status.h index 1223543ce9ec58..47c79045aeaee7 100644 --- a/wt-status.h +++ b/wt-status.h @@ -185,6 +185,7 @@ struct wt_status_serialize_data_fixed uint32_t worktree_status; uint32_t index_status; uint32_t stagemask; + uint32_t rename_status; uint32_t rename_score; uint32_t mode_head; uint32_t mode_index; From 240417aef1ce5946fbcb9a1ab114bdd9a8782cd6 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Wed, 22 Feb 2017 12:50:43 -0700 Subject: [PATCH 025/104] sparse-checkout: update files with a modify/delete conflict When using the sparse-checkout feature, the file might not be on disk because the skip-worktree bit is on. Signed-off-by: Kevin Willford --- merge-recursive.c | 2 +- t/t7615-merge-sparse-checkout.sh | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) create mode 100755 t/t7615-merge-sparse-checkout.sh diff --git a/merge-recursive.c b/merge-recursive.c index cb210b4f7a7a6a..22dc2837458391 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -1500,7 +1500,7 @@ static int handle_change_delete(struct merge_options *opt, * path. We could call update_file_flags() with update_cache=0 * and update_wd=0, but that's a no-op. */ - if (change_branch != opt->branch1 || alt_path) + if (change_branch != opt->branch1 || alt_path || !file_exists(update_path)) ret = update_file(opt, 0, changed, update_path); } free(alt_path); diff --git a/t/t7615-merge-sparse-checkout.sh b/t/t7615-merge-sparse-checkout.sh new file mode 100755 index 00000000000000..8e8208a61d39eb --- /dev/null +++ b/t/t7615-merge-sparse-checkout.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +test_description='merge can handle sparse-checkout' + +. ./test-lib.sh + +# merges with conflicts + +test_expect_success 'setup' ' + test_commit a && + test_commit file && + git checkout -b delete-file && + git rm file.t && + test_tick && + git commit -m "remove file" && + git checkout master && + test_commit modify file.t changed +' + +test_expect_success 'merge conflict deleted file and modified' ' + echo "/a.t" >.git/info/sparse-checkout && + test_config core.sparsecheckout true && + git checkout -f && + test_path_is_missing file.t && + test_must_fail git merge delete-file && + test_path_is_file file.t && + test "changed" = "$(cat file.t)" +' + +test_done From b73ece7e62611b4bed0653797b237a5acc0f6f7d Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Wed, 26 Sep 2018 12:29:26 -0400 Subject: [PATCH 026/104] gvfs:trace2:data: add trace2 tracing around read_object_process Add trace2 region around read_object_process to collect time spent waiting for missing objects to be dynamically fetched. Signed-off-by: Jeff Hostetler --- sha1-file.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/sha1-file.c b/sha1-file.c index d15c1d1b460271..0530b99cce2498 100644 --- a/sha1-file.c +++ b/sha1-file.c @@ -916,6 +916,8 @@ static int read_object_process(const struct object_id *oid) start = getnanotime(); + trace2_region_enter("subprocess", "read_object", the_repository); + if (!subprocess_map_initialized) { subprocess_map_initialized = 1; hashmap_init(&subprocess_map, (hashmap_cmp_fn)cmd2process_cmp, @@ -932,13 +934,16 @@ static int read_object_process(const struct object_id *oid) if (subprocess_start(&subprocess_map, &entry->subprocess, cmd, start_read_object_fn)) { free(entry); - return -1; + err = -1; + goto leave_region; } } process = &entry->subprocess.process; - if (!(CAP_GET & entry->supported_capabilities)) - return -1; + if (!(CAP_GET & entry->supported_capabilities)) { + err = -1; + goto leave_region; + } sigchain_push(SIGPIPE, SIG_IGN); @@ -987,6 +992,10 @@ static int read_object_process(const struct object_id *oid) trace_performance_since(start, "read_object_process"); +leave_region: + trace2_region_leave_printf("subprocess", "read_object", the_repository, + "result %d", err); + return err; } From 2d406779fafbb55127847be6192d1e1aa963e78b Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Fri, 20 Jul 2018 12:08:50 -0400 Subject: [PATCH 027/104] serialize-status: serialize global and repo-local exclude file metadata Changes to the global or repo-local excludes files can change the results returned by "git status" for untracked files. Therefore, it is important that the exclude-file values used during serialization are still current at the time of deserialization. Teach "git status --serialize" to report metadata on the user's global exclude file (which defaults to "$XDG_HOME/git/ignore") and for the repo-local excludes file (which is in ".git/info/excludes"). Serialize will record the pathnames and mtimes for these files in the serialization header (next to the mtime data for the .git/index file). Teach "git status --deserialize" to validate this new metadata. If either exclude file has changed since the serialization-cache-file was written, then deserialize will reject the cache file and force a full/normal status run. Signed-off-by: Jeff Hostetler --- wt-status-deserialize.c | 84 ++++++++++++++++++++++++++++ wt-status-serialize.c | 118 ++++++++++++++++++++++++++++++++++++++++ wt-status.h | 8 +++ 3 files changed, 210 insertions(+) diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c index ee00c1b3a366f8..5c47bf756b20da 100644 --- a/wt-status-deserialize.c +++ b/wt-status-deserialize.c @@ -65,12 +65,69 @@ static int my_validate_index(const struct cache_time *mtime_reported) return DESERIALIZE_OK; } +/* + * Use the given key and exclude pathname to compute a serialization header + * reflecting the current contents on disk. See if that matches the value + * computed for this key when the cache was written. Reject the cache if + * anything has changed. + */ +static int my_validate_excludes(const char *path, const char *key, const char *line) +{ + struct strbuf sb = STRBUF_INIT; + int r; + + wt_serialize_compute_exclude_header(&sb, key, path); + + r = (strcmp(line, sb.buf) ? DESERIALIZE_ERR : DESERIALIZE_OK); + + if (r == DESERIALIZE_ERR) + trace_printf_key(&trace_deserialize, + "%s changed [cached '%s'][observed '%s']", + key, line, sb.buf); + + strbuf_release(&sb); + return r; +} + +static int my_parse_core_excludes(const char *line) +{ + /* + * In dir.c:setup_standard_excludes() they use either the value of + * the "core.excludefile" variable (stored in the global "excludes_file" + * variable) -or- the default value "$XDG_HOME/git/ignore". This is done + * during wt_status_collect_untracked() which we are hoping to not call. + * + * Fake the setup here. + */ + + if (excludes_file) { + return my_validate_excludes(excludes_file, "core_excludes", line); + } else { + char *path = xdg_config_home("ignore"); + int r = my_validate_excludes(path, "core_excludes", line); + free(path); + return r; + } +} + +static int my_parse_repo_excludes(const char *line) +{ + char *path = git_pathdup("info/exclude"); + int r = my_validate_excludes(path, "repo_excludes", line); + free(path); + + return r; +} + static int wt_deserialize_v1_header(struct wt_status *s, int fd) { struct cache_time index_mtime; int line_len, nr_fields; const char *line; const char *arg; + int have_required_index_mtime = 0; + int have_required_core_excludes = 0; + int have_required_repo_excludes = 0; /* * parse header lines up to the first flush packet. @@ -86,6 +143,20 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd) nr_fields, line); return DESERIALIZE_ERR; } + have_required_index_mtime = 1; + continue; + } + + if (skip_prefix(line, "core_excludes ", &arg)) { + if (my_parse_core_excludes(line) != DESERIALIZE_OK) + return DESERIALIZE_ERR; + have_required_core_excludes = 1; + continue; + } + if (skip_prefix(line, "repo_excludes ", &arg)) { + if (my_parse_repo_excludes(line) != DESERIALIZE_OK) + return DESERIALIZE_ERR; + have_required_repo_excludes = 1; continue; } @@ -170,6 +241,19 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd) return DESERIALIZE_ERR; } + if (!have_required_index_mtime) { + trace_printf_key(&trace_deserialize, "missing '%s'", "index_mtime"); + return DESERIALIZE_ERR; + } + if (!have_required_core_excludes) { + trace_printf_key(&trace_deserialize, "missing '%s'", "core_excludes"); + return DESERIALIZE_ERR; + } + if (!have_required_repo_excludes) { + trace_printf_key(&trace_deserialize, "missing '%s'", "repo_excludes"); + return DESERIALIZE_ERR; + } + return my_validate_index(&index_mtime); } diff --git a/wt-status-serialize.c b/wt-status-serialize.c index 6e0d85c60b3e74..f68235593997e4 100644 --- a/wt-status-serialize.c +++ b/wt-status-serialize.c @@ -4,6 +4,122 @@ static struct trace_key trace_serialize = TRACE_KEY_INIT(SERIALIZE); +/* + * Compute header record for exclude file using format: + * SP SP LF + */ +void wt_serialize_compute_exclude_header(struct strbuf *sb, + const char *key, + const char *path) +{ + struct stat st; + struct stat_data sd; + + memset(&sd, 0, sizeof(sd)); + + strbuf_setlen(sb, 0); + + if (!path || !*path) { + strbuf_addf(sb, "%s U (unset)", key); + } else if (lstat(path, &st) == -1) { + if (is_missing_file_error(errno)) + strbuf_addf(sb, "%s E (not-found) %s", key, path); + else + strbuf_addf(sb, "%s E (other) %s", key, path); + } else { + fill_stat_data(&sd, &st); + strbuf_addf(sb, "%s F %d %d %s", + key, sd.sd_mtime.sec, sd.sd_mtime.nsec, path); + } +} + +static void append_exclude_info(int fd, const char *path, const char *key) +{ + struct strbuf sb = STRBUF_INIT; + + wt_serialize_compute_exclude_header(&sb, key, path); + + packet_write_fmt(fd, "%s\n", sb.buf); + + strbuf_release(&sb); +} + +static void append_core_excludes_file_info(int fd) +{ + /* + * Write pathname and mtime of the core/global excludes file to + * the status cache header. Since a change in the global excludes + * will/may change the results reported by status, the deserialize + * code should be able to reject the status cache if the excludes + * file changes since when the cache was written. + * + * The "core.excludefile" setting defaults to $XDG_HOME/git/ignore + * and uses a global variable which should have been set during + * wt_status_collect_untracked(). + * + * See dir.c:setup_standard_excludes() + */ + append_exclude_info(fd, excludes_file, "core_excludes"); +} + +static void append_repo_excludes_file_info(int fd) +{ + /* + * Likewise, there is a per-repo excludes file in .git/info/excludes + * that can change the results reported by status. And the deserialize + * code needs to be able to reject the status cache if this file + * changes. + * + * See dir.c:setup_standard_excludes() and git_path_info_excludes(). + * We replicate the pathname construction here because of the static + * variables/functions used in dir.c. + */ + char *path = git_pathdup("info/exclude"); + + append_exclude_info(fd, path, "repo_excludes"); + + free(path); +} + +/* + * WARNING: The status cache attempts to preserve the essential in-memory + * status data after a status scan into a "serialization" (aka "status cache") + * file. It allows later "git status --deserialize=" instances to + * just print the cached status results without scanning the workdir (and + * without reading the index). + * + * The status cache file is valid as long as: + * [1] the set of functional command line options are the same (think "-u"). + * [2] repo-local and user-global configuration settings are compatible. + * [3] nothing in the workdir has changed. + * + * We rely on: + * [1.a] We remember the relevant (functional, non-display) command line + * arguments in the status cache header. + * [2.a] We use the mtime of the .git/index to detect staging changes. + * [2.b] We use the mtimes of the excludes files to detect changes that + * might affect untracked file reporting. + * + * But we need external help to verify [3]. + * [] This includes changes to tracked files. + * [] This includes changes to tracked .gitignore files that might change + * untracked file reporting. + * [] This includes the creation of new, untracked per-directory .gitignore + * files that might change untracked file reporting. + * + * [3.a] On GVFS repos, we rely on the GVFS service (mount) daemon to + * watch the filesystem and invalidate (delete) the status cache + * when anything changes inside the workdir. + * + * [3.b] TODO This problem is not solved for non-GVFS repos. + * [] It is possible that the untracked-cache index extension + * could help with this but that requires status to read the + * index to load the extension. + * [] It is possible that the new fsmonitor facility could also + * provide this information, but that to requires reading the + * index. + */ + /* * Write V1 header fields. */ @@ -16,6 +132,8 @@ static void wt_serialize_v1_header(struct wt_status *s, int fd) packet_write_fmt(fd, "index_mtime %d %d\n", s->repo->index->timestamp.sec, s->repo->index->timestamp.nsec); + append_core_excludes_file_info(fd); + append_repo_excludes_file_info(fd); /* * Write data from wt_status to qualify this status report. diff --git a/wt-status.h b/wt-status.h index 47c79045aeaee7..2a499f20dfc754 100644 --- a/wt-status.h +++ b/wt-status.h @@ -225,4 +225,12 @@ void wt_status_serialize_v1(int fd, struct wt_status *s); int wt_status_deserialize(const struct wt_status *cmd_s, const char *path); +/* + * A helper routine for serialize and deserialize to compute + * metadata for the user-global and repo-local excludes files. + */ +void wt_serialize_compute_exclude_header(struct strbuf *sb, + const char *key, + const char *path); + #endif /* STATUS_H */ From 5e4a1154426300d0f2705bb704dc1fc644f339a0 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Wed, 1 Mar 2017 15:17:12 -0800 Subject: [PATCH 028/104] sparse-checkout: avoid writing entries with the skip-worktree bit When using the sparse-checkout feature git should not write to the working directory for files with the skip-worktree bit on. With the skip-worktree bit on the file may or may not be in the working directory and if it is not we don't want or need to create it by calling checkout_entry. There are two callers of checkout_target. Both of which check that the file does not exist before calling checkout_target. load_current which make a call to lstat right before calling checkout_target and check_preimage which will only run checkout_taret it stat_ret is less than zero. It sets stat_ret to zero and only if !stat->cached will it lstat the file and set stat_ret to something other than zero. This patch checks if skip-worktree bit is on in checkout_target and just returns so that the entry doesn't not end up in the working directory. This is so that apply will not create a file in the working directory, then update the index but not keep the working directory up to date with the changes that happened in the index. Signed-off-by: Kevin Willford --- apply.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/apply.c b/apply.c index 0533c4f45323b2..5d0e8afed105e6 100644 --- a/apply.c +++ b/apply.c @@ -3346,6 +3346,24 @@ static int checkout_target(struct index_state *istate, { struct checkout costate = CHECKOUT_INIT; + /* + * Do not checkout the entry if the skipworktree bit is set + * + * Both callers of this method (check_preimage and load_current) + * check for the existance of the file before calling this + * method so we know that the file doesn't exist at this point + * and we don't need to perform that check again here. + * We just need to check the skip-worktree and return. + * + * This is to prevent git from creating a file in the + * working directory that has the skip-worktree bit on, + * then updating the index from the patch and not keeping + * the working directory version up to date with what it + * changed the index version to be. + */ + if (ce_skip_worktree(ce)) + return 0; + costate.refresh_cache = 1; costate.istate = istate; if (checkout_entry(ce, &costate, NULL, NULL) || From 61571d06682bf819d9b1a57de82a2684245bd933 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Wed, 26 Sep 2018 11:21:22 -0400 Subject: [PATCH 029/104] gvfs:trace2:data: status deserialization information Add trace2 region and data events describing attempts to deserialize status data using a status cache. A category:status, label:deserialize region is pushed around the deserialize code. Deserialization results when reading from a file are: category:status, path = category:status, polled = category:status, result = "ok" | "reject" When reading from STDIN are: category:status, path = "STDIN" category:status, result = "ok" | "reject" Status will fallback and run a normal status scan when a "reject" is reported (unless "--deserialize-wait=fail"). If "ok" is reported, status was able to use the status cache and avoid scanning the workdir. Additionally, a cmd_mode is emitted for each step: collection, deserialization, and serialization. For example, if deserialization is attempted and fails and status falls back to actually computing the status, a cmd_mode message containing "deserialize" is issued and then a cmd_mode for "collect" is issued. Also, if deserialization fails, a data message containing the rejection reason is emitted. Signed-off-by: Jeff Hostetler --- builtin/commit.c | 19 +++++++++++- wt-status-deserialize.c | 66 ++++++++++++++++++++++++++++++++++++++--- wt-status.h | 2 ++ 3 files changed, 82 insertions(+), 5 deletions(-) diff --git a/builtin/commit.c b/builtin/commit.c index fa83f9ea7b4916..4293d09992586f 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -151,6 +151,7 @@ static int opt_parse_porcelain(const struct option *opt, const char *arg, int un static int do_serialize = 0; static char *serialize_path = NULL; +static int reject_implicit = 0; static int do_implicit_deserialize = 0; static int do_explicit_deserialize = 0; static char *deserialize_path = NULL; @@ -214,7 +215,7 @@ static int opt_parse_deserialize(const struct option *opt, const char *arg, int } if (!deserialize_path || !*deserialize_path) do_explicit_deserialize = 1; /* read stdin */ - else if (access(deserialize_path, R_OK) == 0) + else if (wt_status_deserialize_access(deserialize_path, R_OK) == 0) do_explicit_deserialize = 1; /* can read from this file */ else { /* @@ -1447,6 +1448,8 @@ static int git_status_config(const char *k, const char *v, void *cb) if (v && *v && access(v, R_OK) == 0) { do_implicit_deserialize = 1; deserialize_path = xstrdup(v); + } else { + reject_implicit = 1; } return 0; } @@ -1619,6 +1622,17 @@ int cmd_status(int argc, const char **argv, const char *prefix) if (try_deserialize) goto skip_init; + /* + * If we implicitly received a status cache pathname from the config + * and the file does not exist, we silently reject it and do the normal + * status "collect". Fake up some trace2 messages to reflect this and + * assist post-processors know this case is different. + */ + if (!do_serialize && reject_implicit) { + trace2_cmd_mode("implicit-deserialize"); + trace2_data_string("status", the_repository, "deserialize/reject", + "status-cache/access"); + } enable_fscache(0); if (status_format != STATUS_FORMAT_PORCELAIN && @@ -1662,6 +1676,7 @@ int cmd_status(int argc, const char **argv, const char *prefix) if (s.relative_paths) s.prefix = prefix; + trace2_cmd_mode("deserialize"); result = wt_status_deserialize(&s, deserialize_path, dw); if (result == DESERIALIZE_OK) return 0; @@ -1679,6 +1694,7 @@ int cmd_status(int argc, const char **argv, const char *prefix) fd = -1; } + trace2_cmd_mode("collect"); wt_status_collect(&s); if (0 <= fd) @@ -1693,6 +1709,7 @@ int cmd_status(int argc, const char **argv, const char *prefix) if (fd_serialize < 0) die_errno(_("could not serialize to '%s'"), serialize_path); + trace2_cmd_mode("serialize"); wt_status_serialize_v1(fd_serialize, &s); close(fd_serialize); } diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c index 27e3bb0f053f1f..55110a68036260 100644 --- a/wt-status-deserialize.c +++ b/wt-status-deserialize.c @@ -3,6 +3,22 @@ #include "pkt-line.h" #include "trace.h" +static void set_deserialize_reject_reason(const char *reason) +{ + trace2_data_string("status", the_repository, "deserialize/reject", + reason); +} + +int wt_status_deserialize_access(const char *path, int mode) +{ + int a = access(path, mode); + + if (a != 0) + set_deserialize_reject_reason("status-cache/access"); + + return a; +} + static struct trace_key trace_deserialize = TRACE_KEY_INIT(DESERIALIZE); enum deserialize_parse_strategy { @@ -49,6 +65,7 @@ static int my_validate_index(const struct cache_time *mtime_reported) struct cache_time mtime_observed_on_disk; if (lstat(path, &st)) { + set_deserialize_reject_reason("index/not-found"); trace_printf_key(&trace_deserialize, "could not stat index"); return DESERIALIZE_ERR; } @@ -56,6 +73,7 @@ static int my_validate_index(const struct cache_time *mtime_reported) mtime_observed_on_disk.nsec = ST_MTIME_NSEC(st); if ((mtime_observed_on_disk.sec != mtime_reported->sec) || (mtime_observed_on_disk.nsec != mtime_reported->nsec)) { + set_deserialize_reject_reason("index/mtime-changed"); trace_printf_key(&trace_deserialize, "index mtime changed [des %d %d][obs %d %d]", mtime_reported->sec, mtime_reported->nsec, @@ -81,10 +99,12 @@ static int my_validate_excludes(const char *path, const char *key, const char *l r = (strcmp(line, sb.buf) ? DESERIALIZE_ERR : DESERIALIZE_OK); - if (r == DESERIALIZE_ERR) + if (r == DESERIALIZE_ERR) { + set_deserialize_reject_reason("excludes/changed"); trace_printf_key(&trace_deserialize, "%s changed [cached '%s'][observed '%s']", key, line, sb.buf); + } strbuf_release(&sb); return r; @@ -140,6 +160,7 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd) &index_mtime.sec, &index_mtime.nsec); if (nr_fields != 2) { + set_deserialize_reject_reason("v1-header/invalid-index-mtime"); trace_printf_key(&trace_deserialize, "invalid index_mtime (%d) '%s'", nr_fields, line); return DESERIALIZE_ERR; @@ -223,6 +244,7 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd) /* status_format */ if (skip_prefix(line, "sha1_commit ", &arg)) { if (get_oid_hex(arg, &s->oid_commit)) { + set_deserialize_reject_reason("v1-header/invalid-commit-sha"); trace_printf_key(&trace_deserialize, "invalid sha1_commit"); return DESERIALIZE_ERR; } @@ -238,19 +260,23 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd) } /* prefix */ + set_deserialize_reject_reason("v1-header/unexpected-line"); trace_printf_key(&trace_deserialize, "unexpected line '%s'", line); return DESERIALIZE_ERR; } if (!have_required_index_mtime) { + set_deserialize_reject_reason("v1-header/missing-index-mtime"); trace_printf_key(&trace_deserialize, "missing '%s'", "index_mtime"); return DESERIALIZE_ERR; } if (!have_required_core_excludes) { + set_deserialize_reject_reason("v1-header/missing-core-excludes"); trace_printf_key(&trace_deserialize, "missing '%s'", "core_excludes"); return DESERIALIZE_ERR; } if (!have_required_repo_excludes) { + set_deserialize_reject_reason("v1-header/missing-repo-excludes"); trace_printf_key(&trace_deserialize, "missing '%s'", "repo_excludes"); return DESERIALIZE_ERR; } @@ -336,6 +362,7 @@ static int wt_deserialize_v1_changed_items(const struct wt_status *cmd_s, * So we reject the status cache and let the fallback * code run. */ + set_deserialize_reject_reason("v1-data/unmerged"); trace_printf_key( &trace_deserialize, "reject: V2 format and umerged file: %s", @@ -475,6 +502,7 @@ static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s, * the serialized data */ if (validate_untracked_files_arg(cmd_s->show_untracked_files, &s->show_untracked_files, &untracked_strategy)) { + set_deserialize_reject_reason("args/untracked-files"); trace_printf_key(&trace_deserialize, "reject: show_untracked_file: command: %d, serialized : %d", cmd_s->show_untracked_files, s->show_untracked_files); @@ -482,6 +510,7 @@ static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s, } if (validate_ignored_files_arg(cmd_s->show_ignored_mode, s->show_ignored_mode, &ignored_strategy)) { + set_deserialize_reject_reason("args/ignored-mode"); trace_printf_key(&trace_deserialize, "reject: show_ignored_mode: command: %d, serialized: %d", cmd_s->show_ignored_mode, s->show_ignored_mode); @@ -515,6 +544,7 @@ static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s, return DESERIALIZE_ERR; continue; } + set_deserialize_reject_reason("v1-data/unexpected-line"); trace_printf_key(&trace_deserialize, "unexpected line '%s'", line); return DESERIALIZE_ERR; } @@ -536,6 +566,7 @@ static int wt_deserialize_parse(const struct wt_status *cmd_s, struct wt_status if (version == 1) return wt_deserialize_v1(cmd_s, s, fd); } + set_deserialize_reject_reason("status-cache/unsupported-version"); trace_printf_key(&trace_deserialize, "missing/unsupported version"); return DESERIALIZE_ERR; } @@ -556,6 +587,7 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de * Check the path spec on the current command */ if (cmd_s->pathspec.nr > 1) { + set_deserialize_reject_reason("args/multiple-pathspecs"); trace_printf_key(&trace_deserialize, "reject: multiple pathspecs"); return DESERIALIZE_ERR; } @@ -566,6 +598,7 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de */ if (cmd_s->pathspec.nr == 1 && my_strcmp_null(cmd_s->pathspec.items[0].match, "")) { + set_deserialize_reject_reason("args/root-pathspec"); trace_printf_key(&trace_deserialize, "reject: pathspec"); return DESERIALIZE_ERR; } @@ -582,20 +615,24 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de * or "--ignored" settings). */ if (cmd_s->is_initial != des_s->is_initial) { + set_deserialize_reject_reason("args/is-initial-changed"); trace_printf_key(&trace_deserialize, "reject: is_initial"); return DESERIALIZE_ERR; } if (my_strcmp_null(cmd_s->branch, des_s->branch)) { + set_deserialize_reject_reason("args/branch-changed"); trace_printf_key(&trace_deserialize, "reject: branch"); return DESERIALIZE_ERR; } if (my_strcmp_null(cmd_s->reference, des_s->reference)) { + set_deserialize_reject_reason("args/reference-changed"); trace_printf_key(&trace_deserialize, "reject: reference"); return DESERIALIZE_ERR; } /* verbose */ /* amend */ if (cmd_s->whence != des_s->whence) { + set_deserialize_reject_reason("args/whence-changed"); trace_printf_key(&trace_deserialize, "reject: whence"); return DESERIALIZE_ERR; } @@ -629,19 +666,23 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de /* hints */ /* ahead_behind_flags */ if (cmd_s->detect_rename != des_s->detect_rename) { + set_deserialize_reject_reason("args/detect-rename-changed"); trace_printf_key(&trace_deserialize, "reject: detect_rename"); return DESERIALIZE_ERR; } if (cmd_s->rename_score != des_s->rename_score) { + set_deserialize_reject_reason("args/rename-score-changed"); trace_printf_key(&trace_deserialize, "reject: rename_score"); return DESERIALIZE_ERR; } if (cmd_s->rename_limit != des_s->rename_limit) { + set_deserialize_reject_reason("args/rename-limit-changed"); trace_printf_key(&trace_deserialize, "reject: rename_limit"); return DESERIALIZE_ERR; } /* status_format */ if (!oideq(&cmd_s->oid_commit, &des_s->oid_commit)) { + set_deserialize_reject_reason("args/commit-changed"); trace_printf_key(&trace_deserialize, "reject: sha1_commit"); return DESERIALIZE_ERR; } @@ -730,15 +771,18 @@ static int try_deserialize_read_from_file(const struct wt_status *cmd_s, enum wt_status_deserialize_wait dw, struct wt_status *des_s) { - int k, limit; + int k = 0; + int limit; int result = DESERIALIZE_ERR; /* * For "fail" or "no", try exactly once to read the status cache. * Return an error if the file is stale. */ - if (dw == DESERIALIZE_WAIT__FAIL || dw == DESERIALIZE_WAIT__NO) - return try_deserialize_read_from_file_1(cmd_s, path, des_s); + if (dw == DESERIALIZE_WAIT__FAIL || dw == DESERIALIZE_WAIT__NO) { + result = try_deserialize_read_from_file_1(cmd_s, path, des_s); + goto done; + } /* * Wait for the status cache file to refresh. Wait duration can @@ -763,6 +807,12 @@ static int try_deserialize_read_from_file(const struct wt_status *cmd_s, sleep_millisec(100); } +done: + trace2_data_string("status", the_repository, "deserialize/path", path); + trace2_data_intmax("status", the_repository, "deserialize/polled", k); + trace2_data_string("status", the_repository, "deserialize/result", + ((result == DESERIALIZE_OK) ? "ok" : "reject")); + trace_printf_key(&trace_deserialize, "wait polled=%d result=%d '%s'", k, result, path); @@ -788,6 +838,8 @@ int wt_status_deserialize(const struct wt_status *cmd_s, struct wt_status des_s; int result; + trace2_region_enter("status", "deserialize", the_repository); + if (path && *path && strcmp(path, "0")) { result = try_deserialize_read_from_file(cmd_s, path, dw, &des_s); } else { @@ -798,8 +850,14 @@ int wt_status_deserialize(const struct wt_status *cmd_s, * term, since we cannot read stdin multiple times. */ result = wt_deserialize_fd(cmd_s, &des_s, 0); + + trace2_data_string("status", the_repository, "deserialize/path", "STDIN"); + trace2_data_string("status", the_repository, "deserialize/result", + ((result == DESERIALIZE_OK) ? "ok" : "reject")); } + trace2_region_leave("status", "deserialize", the_repository); + if (result == DESERIALIZE_OK) { wt_status_get_state(cmd_s->repo, &des_s.state, des_s.branch && !strcmp(des_s.branch, "HEAD")); diff --git a/wt-status.h b/wt-status.h index 7a22aa6c9df0d1..cf527e77f70d01 100644 --- a/wt-status.h +++ b/wt-status.h @@ -235,6 +235,8 @@ int wt_status_deserialize(const struct wt_status *cmd_s, const char *path, enum wt_status_deserialize_wait dw); +int wt_status_deserialize_access(const char *path, int mode); + /* * A helper routine for serialize and deserialize to compute * metadata for the user-global and repo-local excludes files. From 1cb226a02ff64aa01f0f35738ba8d2d924f7ccee Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Wed, 25 Jul 2018 14:49:37 -0400 Subject: [PATCH 030/104] status: deserialization wait Teach `git status --deserialize` to either wait indefintely or immediately fail if the status serialization cache file is stale. Signed-off-by: Jeff Hostetler --- Documentation/config/status.txt | 16 +++++ builtin/commit.c | 59 +++++++++++++++- t/t7524-serialized-status.sh | 52 ++++++++++++++ wt-status-deserialize.c | 119 +++++++++++++++++++++++++++++--- wt-status.h | 12 +++- 5 files changed, 245 insertions(+), 13 deletions(-) diff --git a/Documentation/config/status.txt b/Documentation/config/status.txt index 65cecc12e80c80..af043d7e26f269 100644 --- a/Documentation/config/status.txt +++ b/Documentation/config/status.txt @@ -81,3 +81,19 @@ status.deserializePath:: generated by `--serialize`. This will be overridden by `--deserialize=` on the command line. If the cache file is invalid or stale, git will fall-back and compute status normally. + +status.deserializeWait:: + EXPERIMENTAL, Specifies what `git status --deserialize` should do + if the serialization cache file is stale and whether it should + fall-back and compute status normally. This will be overridden by + `--deserialize-wait=` on the command line. ++ +-- +* `fail` - cause git to exit with an error when the status cache file +is stale; this is intended for testing and debugging. +* `block` - cause git to spin and periodically retry the cache file +every 100 ms; this is intended to help coordinate with another git +instance concurrently computing the cache file. +* `no` - to immediately fall-back if cache file is stale. This is the default. +* `` - time (in tenths of a second) to spin and retry. +-- diff --git a/builtin/commit.c b/builtin/commit.c index d191f151f1c367..eeea02a24948ae 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -155,6 +155,9 @@ static int do_implicit_deserialize = 0; static int do_explicit_deserialize = 0; static char *deserialize_path = NULL; +static enum wt_status_deserialize_wait implicit_deserialize_wait = DESERIALIZE_WAIT__UNSET; +static enum wt_status_deserialize_wait explicit_deserialize_wait = DESERIALIZE_WAIT__UNSET; + /* * --serialize | --serialize= * @@ -220,6 +223,40 @@ static int opt_parse_deserialize(const struct option *opt, const char *arg, int return 0; } +static enum wt_status_deserialize_wait parse_dw(const char *arg) +{ + int tenths; + + if (!strcmp(arg, "fail")) + return DESERIALIZE_WAIT__FAIL; + else if (!strcmp(arg, "block")) + return DESERIALIZE_WAIT__BLOCK; + else if (!strcmp(arg, "no")) + return DESERIALIZE_WAIT__NO; + + /* + * Otherwise, assume it is a timeout in tenths of a second. + * If it contains a bogus value, atol() will return zero + * which is OK. + */ + tenths = atol(arg); + if (tenths < 0) + tenths = DESERIALIZE_WAIT__NO; + return tenths; +} + +static int opt_parse_deserialize_wait(const struct option *opt, + const char *arg, + int unset) +{ + if (unset) + explicit_deserialize_wait = DESERIALIZE_WAIT__UNSET; + else + explicit_deserialize_wait = parse_dw(arg); + + return 0; +} + static int opt_parse_m(const struct option *opt, const char *arg, int unset) { struct strbuf *buf = opt->value; @@ -1407,6 +1444,13 @@ static int git_status_config(const char *k, const char *v, void *cb) } return 0; } + if (!strcmp(k, "status.deserializewait")) { + if (!v || !*v) + implicit_deserialize_wait = DESERIALIZE_WAIT__UNSET; + else + implicit_deserialize_wait = parse_dw(v); + return 0; + } if (!strcmp(k, "status.showuntrackedfiles")) { if (!v) return config_error_nonbool(k); @@ -1471,6 +1515,9 @@ int cmd_status(int argc, const char **argv, const char *prefix) { OPTION_CALLBACK, 0, "deserialize", NULL, N_("path"), N_("deserialize raw status data from file"), PARSE_OPT_OPTARG, opt_parse_deserialize }, + { OPTION_CALLBACK, 0, "deserialize-wait", NULL, + N_("fail|block|no"), N_("how to wait if status cache file is invalid"), + PARSE_OPT_OPTARG, opt_parse_deserialize_wait }, OPT_SET_INT(0, "long", &status_format, N_("show status in long format (default)"), STATUS_FORMAT_LONG), @@ -1583,11 +1630,21 @@ int cmd_status(int argc, const char **argv, const char *prefix) } if (try_deserialize) { + int result; + enum wt_status_deserialize_wait dw = implicit_deserialize_wait; + if (explicit_deserialize_wait != DESERIALIZE_WAIT__UNSET) + dw = explicit_deserialize_wait; + if (dw == DESERIALIZE_WAIT__UNSET) + dw = DESERIALIZE_WAIT__NO; + if (s.relative_paths) s.prefix = prefix; - if (wt_status_deserialize(&s, deserialize_path) == DESERIALIZE_OK) + result = wt_status_deserialize(&s, deserialize_path, dw); + if (result == DESERIALIZE_OK) return 0; + if (dw == DESERIALIZE_WAIT__FAIL) + die(_("Rejected status serialization cache")); /* deserialize failed, so force the initialization we skipped above. */ enable_fscache(1); diff --git a/t/t7524-serialized-status.sh b/t/t7524-serialized-status.sh index c0c2a4f2c4dd1a..46eb746fa1e357 100755 --- a/t/t7524-serialized-status.sh +++ b/t/t7524-serialized-status.sh @@ -186,6 +186,58 @@ test_expect_success 'verify new --serialize=path mode' ' test_i18ncmp expect output.2 ' +test_expect_success 'try deserialize-wait feature' ' + test_when_finished "rm -f serialized_status.dat dirt expect.* output.* trace.*" && + + git status --serialize=serialized_status.dat >output.1 && + + # make status cache stale by updating the mtime on the index. confirm that + # deserialize fails when requested. + sleep 1 && + touch .git/index && + test_must_fail git status --deserialize=serialized_status.dat --deserialize-wait=fail && + test_must_fail git -c status.deserializeWait=fail status --deserialize=serialized_status.dat && + + cat >expect.1 <<-\EOF && + ? expect.1 + ? output.1 + ? serialized_status.dat + ? untracked/ + ? untracked_1.txt + EOF + + # refresh the status cache. + git status --porcelain=v2 --serialize=serialized_status.dat >output.1 && + test_cmp expect.1 output.1 && + + # create some dirt. confirm deserialize used the existing status cache. + echo x >dirt && + git status --porcelain=v2 --deserialize=serialized_status.dat >output.2 && + test_cmp output.1 output.2 && + + # make the cache stale and try the timeout feature and wait upto + # 2 tenths of a second. confirm deserialize timed out and rejected + # the status cache and did a normal scan. + + cat >expect.2 <<-\EOF && + ? dirt + ? expect.1 + ? expect.2 + ? output.1 + ? output.2 + ? serialized_status.dat + ? trace.2 + ? untracked/ + ? untracked_1.txt + EOF + + sleep 1 && + touch .git/index && + GIT_TRACE_DESERIALIZE=1 git status --porcelain=v2 --deserialize=serialized_status.dat --deserialize-wait=2 >output.2 2>trace.2 && + test_cmp expect.2 output.2 && + grep "wait polled=2 result=1" trace.2 >trace.2g +' + test_expect_success 'merge conflicts' ' # create a merge conflict. diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c index 5c47bf756b20da..61c8fbc44bffa1 100644 --- a/wt-status-deserialize.c +++ b/wt-status-deserialize.c @@ -56,7 +56,8 @@ static int my_validate_index(const struct cache_time *mtime_reported) mtime_observed_on_disk.nsec = ST_MTIME_NSEC(st); if ((mtime_observed_on_disk.sec != mtime_reported->sec) || (mtime_observed_on_disk.nsec != mtime_reported->nsec)) { - trace_printf_key(&trace_deserialize, "index mtime changed [des %d.%d][obs %d.%d]", + trace_printf_key(&trace_deserialize, + "index mtime changed [des %d %d][obs %d %d]", mtime_reported->sec, mtime_reported->nsec, mtime_observed_on_disk.sec, mtime_observed_on_disk.nsec); return DESERIALIZE_ERR; @@ -545,6 +546,8 @@ static inline int my_strcmp_null(const char *a, const char *b) static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *des_s, int fd) { + memset(des_s, 0, sizeof(*des_s)); + /* * Check the path spec on the current command */ @@ -668,8 +671,101 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de return DESERIALIZE_OK; } +static struct cache_time deserialize_prev_mtime = { 0, 0 }; + +static int try_deserialize_read_from_file_1(const struct wt_status *cmd_s, + const char *path, + struct wt_status *des_s) +{ + struct stat st; + int result; + int fd; + + /* + * If we are spinning waiting for the status cache to become + * valid, skip re-reading it if the mtime has not changed + * since the last time we read it. + */ + if (lstat(path, &st)) { + trace_printf_key(&trace_deserialize, + "could not lstat '%s'", path); + return DESERIALIZE_ERR; + } + if (st.st_mtime == deserialize_prev_mtime.sec && + ST_MTIME_NSEC(st) == deserialize_prev_mtime.nsec) { + trace_printf_key(&trace_deserialize, + "mtime has not changed '%s'", path); + return DESERIALIZE_ERR; + } + + fd = xopen(path, O_RDONLY); + if (fd == -1) { + trace_printf_key(&trace_deserialize, + "could not read '%s'", path); + return DESERIALIZE_ERR; + } + + deserialize_prev_mtime.sec = st.st_mtime; + deserialize_prev_mtime.nsec = ST_MTIME_NSEC(st); + + trace_printf_key(&trace_deserialize, + "reading serialization file (%d %d) '%s'", + deserialize_prev_mtime.sec, + deserialize_prev_mtime.nsec, + path); + + result = wt_deserialize_fd(cmd_s, des_s, fd); + close(fd); + + return result; +} + +static int try_deserialize_read_from_file(const struct wt_status *cmd_s, + const char *path, + enum wt_status_deserialize_wait dw, + struct wt_status *des_s) +{ + int k, limit; + int result = DESERIALIZE_ERR; + + /* + * For "fail" or "no", try exactly once to read the status cache. + * Return an error if the file is stale. + */ + if (dw == DESERIALIZE_WAIT__FAIL || dw == DESERIALIZE_WAIT__NO) + return try_deserialize_read_from_file_1(cmd_s, path, des_s); + + /* + * Wait for the status cache file to refresh. Wait duration can + * be in tenths of a second or unlimited. Poll every 100ms. + */ + if (dw == DESERIALIZE_WAIT__BLOCK) { + /* + * Convert "unlimited" to 1 day. + */ + limit = 10 * 60 * 60 * 24; + } else { + /* spin for dw tenths of a second */ + limit = dw; + } + for (k = 0; k < limit; k++) { + result = try_deserialize_read_from_file_1( + cmd_s, path, des_s); + + if (result == DESERIALIZE_OK) + break; + + sleep_millisec(100); + } + + trace_printf_key(&trace_deserialize, + "wait polled=%d result=%d '%s'", + k, result, path); + return result; +} + /* - * Read raw serialized status data from the given file + * Read raw serialized status data from the given file (or STDIN). * * Verify that the args specified in the current command * are compatible with the deserialized data (such as "-uno"). @@ -677,24 +773,25 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de * Copy display-related fields from the current command * into the deserialized data (so that the user can request * long or short as they please). + * + * Print status report using cached data. */ int wt_status_deserialize(const struct wt_status *cmd_s, - const char *path) + const char *path, + enum wt_status_deserialize_wait dw) { struct wt_status des_s; int result; if (path && *path && strcmp(path, "0")) { - int fd = xopen(path, O_RDONLY); - if (fd == -1) { - trace_printf_key(&trace_deserialize, "could not read '%s'", path); - return DESERIALIZE_ERR; - } - trace_printf_key(&trace_deserialize, "reading serialization file '%s'", path); - result = wt_deserialize_fd(cmd_s, &des_s, fd); - close(fd); + result = try_deserialize_read_from_file(cmd_s, path, dw, &des_s); } else { trace_printf_key(&trace_deserialize, "reading stdin"); + + /* + * Read status cache data from stdin. Ignore the deserialize-wait + * term, since we cannot read stdin multiple times. + */ result = wt_deserialize_fd(cmd_s, &des_s, 0); } diff --git a/wt-status.h b/wt-status.h index 2a499f20dfc754..7a22aa6c9df0d1 100644 --- a/wt-status.h +++ b/wt-status.h @@ -209,6 +209,15 @@ struct wt_status_serialize_data - sizeof(struct wt_status_serialize_data_fixed)]; }; +enum wt_status_deserialize_wait +{ + DESERIALIZE_WAIT__UNSET = -3, + DESERIALIZE_WAIT__FAIL = -2, /* return error, do not fallback */ + DESERIALIZE_WAIT__BLOCK = -1, /* unlimited timeout */ + DESERIALIZE_WAIT__NO = 0, /* immediately fallback */ + /* any positive value is a timeout in tenths of a second */ +}; + /* * Serialize computed status scan results using "version 1" format * to the given file. @@ -223,7 +232,8 @@ void wt_status_serialize_v1(int fd, struct wt_status *s); * fields. */ int wt_status_deserialize(const struct wt_status *cmd_s, - const char *path); + const char *path, + enum wt_status_deserialize_wait dw); /* * A helper routine for serialize and deserialize to compute From 732cc4702f2cecaeb1278b0216dfde81cb04fce9 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Wed, 15 Mar 2017 16:36:53 -0600 Subject: [PATCH 031/104] Fix reset when using the sparse-checkout feature. When using the sparse checkout feature the git reset command will add entries to the index that will have the skip-worktree bit off but will leave the working directory empty. File data is lost because the index version of the files has been changed but there is nothing that is in the working directory. This will cause the next status call to show either deleted for files modified or deleting or nothing for files added. The added files should be shown as untracked and modified files should be shown as modified. To fix this when the reset is running if there is not a file in the working directory and if it will be missing with the new index entry or was not missing in the previous version, we create the previous index version of the file in the working directory so that status will report correctly and the files will be availble for the user to deal with. Signed-off-by: Kevin Willford --- builtin/reset.c | 34 +++++++++++++++++++ t/t7114-reset-sparse-checkout.sh | 58 ++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+) create mode 100755 t/t7114-reset-sparse-checkout.sh diff --git a/builtin/reset.c b/builtin/reset.c index 1a77b4e2b4ee34..001b0ad60948ab 100644 --- a/builtin/reset.c +++ b/builtin/reset.c @@ -27,6 +27,7 @@ #include "submodule-config.h" #include "strbuf.h" #include "quote.h" +#include "dir.h" #define REFRESH_INDEX_DELAY_WARNING_IN_MS (2 * 1000) @@ -130,12 +131,45 @@ static void update_index_from_diff(struct diff_queue_struct *q, struct diff_options *opt, void *data) { int i; + int pos; int intent_to_add = *(int *)data; for (i = 0; i < q->nr; i++) { struct diff_filespec *one = q->queue[i]->one; + struct diff_filespec *two = q->queue[i]->two; int is_missing = !(one->mode && !is_null_oid(&one->oid)); + int was_missing = !two->mode && is_null_oid(&two->oid); struct cache_entry *ce; + struct cache_entry *ceBefore; + struct checkout state = CHECKOUT_INIT; + + /* + * When using the sparse-checkout feature the cache entries that are + * added here will not have the skip-worktree bit set. + * Without this code there is data that is lost because the files that + * would normally be in the working directory are not there and show as + * deleted for the next status or in the case of added files just disappear. + * We need to create the previous version of the files in the working + * directory so that they will have the right content and the next + * status call will show modified or untracked files correctly. + */ + if (core_apply_sparse_checkout && !file_exists(two->path)) + { + pos = cache_name_pos(two->path, strlen(two->path)); + if ((pos >= 0 && ce_skip_worktree(active_cache[pos])) && (is_missing || !was_missing)) + { + state.force = 1; + state.refresh_cache = 1; + state.istate = &the_index; + ceBefore = make_cache_entry(&the_index, two->mode, &two->oid, two->path, + 0, 0); + if (!ceBefore) + die(_("make_cache_entry failed for path '%s'"), + two->path); + + checkout_entry(ceBefore, &state, NULL, NULL); + } + } if (is_missing && !intent_to_add) { remove_file_from_cache(one->path); diff --git a/t/t7114-reset-sparse-checkout.sh b/t/t7114-reset-sparse-checkout.sh new file mode 100755 index 00000000000000..c46cbdb64e4ebc --- /dev/null +++ b/t/t7114-reset-sparse-checkout.sh @@ -0,0 +1,58 @@ +#!/bin/sh + +test_description='reset when using a sparse-checkout' + +. ./test-lib.sh + +# reset using a sparse-checkout file + +test_expect_success 'setup' ' + test_tick && + echo "checkout file" >c && + echo "modify file" >m && + echo "delete file" >d && + git add . && + git commit -m "initial commit" && + echo "added file" >a && + echo "modification of a file" >m && + git rm d && + git add . && + git commit -m "second commit" && + git checkout -b endCommit +' + +test_expect_success 'reset when there is a sparse-checkout' ' + echo "/c" >.git/info/sparse-checkout && + test_config core.sparsecheckout true && + git checkout -B resetBranch && + test_path_is_missing m && + test_path_is_missing a && + test_path_is_missing d && + git reset HEAD~1 && + test "checkout file" = "$(cat c)" && + test "modification of a file" = "$(cat m)" && + test "added file" = "$(cat a)" && + test_path_is_missing d +' + +test_expect_success 'reset after deleting file without skip-worktree bit' ' + git checkout -f endCommit && + git clean -xdf && + echo "/c +/m" >.git/info/sparse-checkout && + test_config core.sparsecheckout true && + git checkout -B resetAfterDelete && + test_path_is_file m && + test_path_is_missing a && + test_path_is_missing d && + rm -f m && + git reset HEAD~1 && + test "checkout file" = "$(cat c)" && + test "added file" = "$(cat a)" && + test_path_is_missing m && + test_path_is_missing d +' + + + +test_done From e14a7a7794fd8993f266d93c0792a79f24181536 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Mon, 7 Jan 2019 12:45:48 -0500 Subject: [PATCH 032/104] gvfs:trace2:data: status serialization Add trace information around status serialization. Signed-off-by: Jeff Hostetler --- wt-status-serialize.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/wt-status-serialize.c b/wt-status-serialize.c index f68235593997e4..54a365e5a020eb 100644 --- a/wt-status-serialize.c +++ b/wt-status-serialize.c @@ -297,6 +297,8 @@ void wt_status_serialize_v1(int fd, struct wt_status *s) struct string_list_item *iter; int k; + trace2_region_enter("status", "serialize", the_repository); + /* * version header must be first line. */ @@ -330,4 +332,6 @@ void wt_status_serialize_v1(int fd, struct wt_status *s) } packet_flush(fd); } + + trace2_region_leave("status", "serialize", the_repository); } From ce3f9318ca03d554ba48f04d8444a9bd9ff5cfb5 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Tue, 21 May 2019 23:14:48 +0200 Subject: [PATCH 033/104] merge-recursive: avoid confusing logic in was_dirty() It took this developer more than a moment to verify that was_dirty() really returns 0 (i.e. "false") if the file was not even tracked. In other words, the `dirty` variable that was initialized to 1 (i.e. "true") and then negated to be returned was not helping readability. The same holds for the final return: rather than assigning the value to return to `dirty` and then *immediately* returning that, we can simplify it to a single statement. --- merge-recursive.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/merge-recursive.c b/merge-recursive.c index 22dc2837458391..29f83d92e29cb7 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -857,15 +857,13 @@ static int would_lose_untracked(struct merge_options *opt, const char *path) static int was_dirty(struct merge_options *opt, const char *path) { struct cache_entry *ce; - int dirty = 1; if (opt->priv->call_depth || !was_tracked(opt, path)) - return !dirty; + return 0; ce = index_file_exists(opt->priv->unpack_opts.src_index, path, strlen(path), ignore_case); - dirty = verify_uptodate(ce, &opt->priv->unpack_opts) != 0; - return dirty; + return verify_uptodate(ce, &opt->priv->unpack_opts) != 0; } static int make_room_for_path(struct merge_options *opt, const char *path) From 02b3ad134e257f9ee4be4d47450b1ddb30c1d029 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Wed, 5 Apr 2017 10:55:32 -0600 Subject: [PATCH 034/104] Do not remove files outside the sparse-checkout Signed-off-by: Kevin Willford --- unpack-trees.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/unpack-trees.c b/unpack-trees.c index 84d170d1eca9f0..d497bfe9b64d3b 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -543,7 +543,9 @@ static int apply_sparse_checkout(struct index_state *istate, ce->ce_flags &= ~CE_SKIP_WORKTREE; return -1; } - ce->ce_flags |= CE_WT_REMOVE; + if (!gvfs_config_is_set(GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT)) + ce->ce_flags |= CE_WT_REMOVE; + ce->ce_flags &= ~CE_UPDATE; } if (was_skip_worktree && !ce_skip_worktree(ce)) { From ea4a6014d768819e8a0858240c74a092115fa3ec Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Mon, 19 Nov 2018 16:26:37 -0500 Subject: [PATCH 035/104] gvfs:trace2:data: add vfs stats Report virtual filesystem summary data. Signed-off-by: Jeff Hostetler --- virtualfilesystem.c | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/virtualfilesystem.c b/virtualfilesystem.c index c93357e1508a51..7a107c3e2e107b 100644 --- a/virtualfilesystem.c +++ b/virtualfilesystem.c @@ -254,6 +254,11 @@ void apply_virtualfilesystem(struct index_state *istate) { char *buf, *entry; int i; + int nr_unknown = 0; + int nr_vfs_dirs = 0; + int nr_vfs_rows = 0; + int nr_bulk_skip = 0; + int nr_explicit_skip = 0; if (!git_config_get_virtualfilesystem()) return; @@ -271,16 +276,21 @@ void apply_virtualfilesystem(struct index_state *istate) if (buf[i] == '\0') { int pos, len; + nr_vfs_rows++; + len = buf + i - entry; /* look for a directory wild card (ie "dir1/") */ if (buf[i - 1] == '/') { + nr_vfs_dirs++; if (ignore_case) adjust_dirname_case(istate, entry); pos = index_name_pos(istate, entry, len); if (pos < 0) { pos = -pos - 1; while (pos < istate->cache_nr && !fspathncmp(istate->cache[pos]->name, entry, len)) { + if (istate->cache[pos]->ce_flags & CE_SKIP_WORKTREE) + nr_bulk_skip++; istate->cache[pos]->ce_flags &= ~CE_SKIP_WORKTREE; pos++; } @@ -288,18 +298,41 @@ void apply_virtualfilesystem(struct index_state *istate) } else { if (ignore_case) { struct cache_entry *ce = index_file_exists(istate, entry, len, ignore_case); - if (ce) + if (ce) { + if (ce->ce_flags & CE_SKIP_WORKTREE) + nr_explicit_skip++; ce->ce_flags &= ~CE_SKIP_WORKTREE; + } + else { + nr_unknown++; + } } else { int pos = index_name_pos(istate, entry, len); - if (pos >= 0) + if (pos >= 0) { + if (istate->cache[pos]->ce_flags & CE_SKIP_WORKTREE) + nr_explicit_skip++; istate->cache[pos]->ce_flags &= ~CE_SKIP_WORKTREE; + } + else { + nr_unknown++; + } } } entry += len + 1; } } + + if (nr_vfs_rows > 0) { + trace2_data_intmax("vfs", the_repository, "apply/tracked", nr_bulk_skip + nr_explicit_skip); + + trace2_data_intmax("vfs", the_repository, "apply/vfs_rows", nr_vfs_rows); + trace2_data_intmax("vfs", the_repository, "apply/vfs_dirs", nr_vfs_dirs); + + trace2_data_intmax("vfs", the_repository, "apply/nr_unknown", nr_unknown); + trace2_data_intmax("vfs", the_repository, "apply/nr_bulk_skip", nr_bulk_skip); + trace2_data_intmax("vfs", the_repository, "apply/nr_explicit_skip", nr_explicit_skip); + } } /* From bfaaaa4145858deadc83aa2d73303aea81f30dc6 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Tue, 21 May 2019 23:17:46 +0200 Subject: [PATCH 036/104] merge-recursive: add some defensive coding to was_dirty() It took this developer quite a good while to understand why the current code cannot get a `NULL` returned by `index_file_exists()`. To un-confuse readers (and future-proof the code), let's just be safe and check before we dereference the returned pointer. --- merge-recursive.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/merge-recursive.c b/merge-recursive.c index 29f83d92e29cb7..b6cca3fed65d70 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -863,7 +863,7 @@ static int was_dirty(struct merge_options *opt, const char *path) ce = index_file_exists(opt->priv->unpack_opts.src_index, path, strlen(path), ignore_case); - return verify_uptodate(ce, &opt->priv->unpack_opts) != 0; + return !ce || verify_uptodate(ce, &opt->priv->unpack_opts) != 0; } static int make_room_for_path(struct merge_options *opt, const char *path) From 3e3a826894cd51d098aed5bad0ecb375dc3a0f17 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Fri, 14 Apr 2017 10:59:20 -0600 Subject: [PATCH 037/104] gvfs: refactor loading the core.gvfs config value This code change makes sure that the config value for core_gvfs is always loaded before checking it. Signed-off-by: Kevin Willford --- Makefile | 1 + gvfs.c | 38 ++++++++++++++++++++++++++++++++++++++ gvfs.h | 31 ++----------------------------- 3 files changed, 41 insertions(+), 29 deletions(-) create mode 100644 gvfs.c diff --git a/Makefile b/Makefile index 93ea3ab5b61afd..f2cac81e0c51ee 100644 --- a/Makefile +++ b/Makefile @@ -890,6 +890,7 @@ LIB_OBJS += gettext.o LIB_OBJS += gpg-interface.o LIB_OBJS += graph.o LIB_OBJS += grep.o +LIB_OBJS += gvfs.o LIB_OBJS += hashmap.o LIB_OBJS += help.o LIB_OBJS += hex.o diff --git a/gvfs.c b/gvfs.c new file mode 100644 index 00000000000000..7235199c04ac39 --- /dev/null +++ b/gvfs.c @@ -0,0 +1,38 @@ +#include "cache.h" +#include "gvfs.h" +#include "config.h" + +static int gvfs_config_loaded; +static int core_gvfs_is_bool; + +static int early_core_gvfs_config(const char *var, const char *value, void *data) +{ + if (!strcmp(var, "core.gvfs")) + core_gvfs = git_config_bool_or_int("core.gvfs", value, &core_gvfs_is_bool); + return 0; +} + +void gvfs_load_config_value(const char *value) +{ + if (gvfs_config_loaded) + return; + + if (value) + core_gvfs = git_config_bool_or_int("core.gvfs", value, &core_gvfs_is_bool); + else if (startup_info->have_repository == 0) + read_early_config(early_core_gvfs_config, NULL); + else + git_config_get_bool_or_int("core.gvfs", &core_gvfs_is_bool, &core_gvfs); + + /* Turn on all bits if a bool was set in the settings */ + if (core_gvfs_is_bool && core_gvfs) + core_gvfs = -1; + + gvfs_config_loaded = 1; +} + +int gvfs_config_is_set(int mask) +{ + gvfs_load_config_value(0); + return (core_gvfs & mask) == mask; +} diff --git a/gvfs.h b/gvfs.h index 2d6de575bf4a65..7c9367866f502a 100644 --- a/gvfs.h +++ b/gvfs.h @@ -1,8 +1,6 @@ #ifndef GVFS_H #define GVFS_H -#include "cache.h" -#include "config.h" /* * This file is for the specific settings and methods @@ -19,32 +17,7 @@ #define GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK (1 << 4) #define GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS (1 << 6) -static inline int gvfs_config_is_set(int mask) { - return (core_gvfs & mask) == mask; -} - -static inline int gvfs_config_is_set_any(void) { - return core_gvfs > 0; -} - -static inline void gvfs_load_config_value(const char *value) { - int is_bool = 0; - - if (value) - core_gvfs = git_config_bool_or_int("core.gvfs", value, &is_bool); - else - git_config_get_bool_or_int("core.gvfs", &is_bool, &core_gvfs); - - /* Turn on all bits if a bool was set in the settings */ - if (is_bool && core_gvfs) - core_gvfs = -1; -} - - -static inline int gvfs_config_load_and_is_set(int mask) { - gvfs_load_config_value(0); - return gvfs_config_is_set(mask); -} - +void gvfs_load_config_value(const char *value); +int gvfs_config_is_set(int mask); #endif /* GVFS_H */ From 59608b9f68b143e453f5bbc259645d9895dad1a1 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Mon, 15 Apr 2019 13:39:43 -0700 Subject: [PATCH 038/104] trace2: refactor setting process starting time Create trace2_initialize_clock() and call from main() to capture process start time in isolation and before other sub-systems are ready. Signed-off-by: Jeff Hostetler Signed-off-by: Junio C Hamano --- compat/mingw.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/compat/mingw.c b/compat/mingw.c index c8fd4538d7d440..9dd108b2ea92f8 100644 --- a/compat/mingw.c +++ b/compat/mingw.c @@ -3680,6 +3680,8 @@ int wmain(int argc, const wchar_t **wargv) SetConsoleCtrlHandler(handle_ctrl_c, TRUE); + trace2_initialize_clock(); + maybe_redirect_std_handles(); adjust_symlink_flags(); fsync_object_files = 1; From 4ad0ad9c5e1aa4696af7c15803b3838aebfbcbe2 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Tue, 21 May 2019 23:20:16 +0200 Subject: [PATCH 039/104] merge-recursive: teach was_dirty() about the virtualfilesystem The idea of the virtual file system really is to tell Git to avoid accessing certain paths. This fixes the case where a given path is not yet included in the virtual file system and we are about to write a conflicted version of it. --- merge-recursive.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/merge-recursive.c b/merge-recursive.c index b6cca3fed65d70..833f0e6f6567e6 100644 --- a/merge-recursive.c +++ b/merge-recursive.c @@ -29,6 +29,7 @@ #include "tree-walk.h" #include "unpack-trees.h" #include "xdiff-interface.h" +#include "virtualfilesystem.h" struct merge_options_internal { int call_depth; @@ -858,7 +859,8 @@ static int was_dirty(struct merge_options *opt, const char *path) { struct cache_entry *ce; - if (opt->priv->call_depth || !was_tracked(opt, path)) + if (opt->priv->call_depth || !was_tracked(opt, path) || + is_excluded_from_virtualfilesystem(path, strlen(path), DT_REG) == 1) return 0; ce = index_file_exists(opt->priv->unpack_opts.src_index, From 2b889242058be977efe5abea7f54a207327315ee Mon Sep 17 00:00:00 2001 From: Ben Peart Date: Thu, 11 Jan 2018 16:25:08 -0500 Subject: [PATCH 040/104] Add virtual file system settings and hook proc On index load, clear/set the skip worktree bits based on the virtual file system data. Use virtual file system data to update skip-worktree bit in unpack-trees. Use virtual file system data to exclude files and folders not explicitly requested. Signed-off-by: Ben Peart --- Documentation/config/core.txt | 8 + Documentation/githooks.txt | 20 ++ Makefile | 1 + cache.h | 1 + config.c | 23 ++- config.h | 1 + dir.c | 32 +++- environment.c | 1 + read-cache.c | 2 + t/t1092-virtualfilesystem.sh | 349 ++++++++++++++++++++++++++++++++++ unpack-trees.c | 14 +- virtualfilesystem.c | 312 ++++++++++++++++++++++++++++++ virtualfilesystem.h | 25 +++ 13 files changed, 785 insertions(+), 4 deletions(-) create mode 100755 t/t1092-virtualfilesystem.sh create mode 100644 virtualfilesystem.c create mode 100644 virtualfilesystem.h diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index c75ae0171e9365..821c62cd3cbd9e 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -79,6 +79,14 @@ core.fsmonitorHookVersion:: something that can be used to determine what files have changed without race conditions. +core.virtualFilesystem:: + If set, the value of this variable is used as a command which + will identify all files and directories that are present in + the working directory. Git will only track and update files + listed in the virtual file system. Using the virtual file system + will supersede the sparse-checkout settings which will be ignored. + See the "virtual file system" section of linkgit:githooks[5]. + core.trustctime:: If false, the ctime differences between the index and the working tree are ignored; useful when the inode change time diff --git a/Documentation/githooks.txt b/Documentation/githooks.txt index 81f2a87e88ba5f..2d195d9b883701 100644 --- a/Documentation/githooks.txt +++ b/Documentation/githooks.txt @@ -596,6 +596,26 @@ and "0" meaning they were not. Only one parameter should be set to "1" when the hook runs. The hook running passing "1", "1" should not be possible. +virtualFilesystem +~~~~~~~~~~~~~~~~~~ + +"Virtual File System" allows populating the working directory sparsely. +The projection data is typically automatically generated by an external +process. Git will limit what files it checks for changes as well as which +directories are checked for untracked files based on the path names given. +Git will also only update those files listed in the projection. + +The hook is invoked when the configuration option core.virtualFilesystem +is set. It takes one argument, a version (currently 1). + +The hook should output to stdout the list of all files in the working +directory that git should track. The paths are relative to the root +of the working directory and are separated by a single NUL. Full paths +('dir1/a.txt') as well as directories are supported (ie 'dir1/'). + +The exit status determines whether git will use the data from the +hook. On error, git will abort the command with an error message. + GIT --- Part of the linkgit:git[1] suite diff --git a/Makefile b/Makefile index f2cac81e0c51ee..fdbe52e9169993 100644 --- a/Makefile +++ b/Makefile @@ -1027,6 +1027,7 @@ LIB_OBJS += utf8.o LIB_OBJS += varint.o LIB_OBJS += version.o LIB_OBJS += versioncmp.o +LIB_OBJS += virtualfilesystem.o LIB_OBJS += walker.o LIB_OBJS += wildmatch.o LIB_OBJS += worktree.o diff --git a/cache.h b/cache.h index 523e59417bbf7f..38b269d8100a6e 100644 --- a/cache.h +++ b/cache.h @@ -955,6 +955,7 @@ extern char *git_replace_ref_base; extern int fsync_object_files; extern int core_preload_index; +extern const char *core_virtualfilesystem; extern int core_gvfs; extern int precomposed_unicode; extern int protect_hfs; diff --git a/config.c b/config.c index 7489793a2a5a04..3be44a69a9607a 100644 --- a/config.c +++ b/config.c @@ -1381,7 +1381,11 @@ static int git_default_core_config(const char *var, const char *value, void *cb) } if (!strcmp(var, "core.sparsecheckout")) { - core_apply_sparse_checkout = git_config_bool(var, value); + /* virtual file system relies on the sparse checkout logic so force it on */ + if (core_virtualfilesystem) + core_apply_sparse_checkout = 1; + else + core_apply_sparse_checkout = git_config_bool(var, value); return 0; } @@ -2359,6 +2363,23 @@ int git_config_get_fsmonitor(void) return 0; } +int git_config_get_virtualfilesystem(void) +{ + if (git_config_get_pathname("core.virtualfilesystem", &core_virtualfilesystem)) + core_virtualfilesystem = getenv("GIT_VIRTUALFILESYSTEM_TEST"); + + if (core_virtualfilesystem && !*core_virtualfilesystem) + core_virtualfilesystem = NULL; + + /* virtual file system relies on the sparse checkout logic so force it on */ + if (core_virtualfilesystem) { + core_apply_sparse_checkout = 1; + return 1; + } + + return 0; +} + int git_config_get_index_threads(int *dest) { int is_bool, val; diff --git a/config.h b/config.h index 060874488f413c..dd408a41d40781 100644 --- a/config.h +++ b/config.h @@ -584,6 +584,7 @@ int git_config_get_untracked_cache(void); int git_config_get_split_index(void); int git_config_get_max_percent_split_change(void); int git_config_get_fsmonitor(void); +int git_config_get_virtualfilesystem(void); /* This dies if the configured or default date is in the future */ int git_config_get_expiry(const char *key, const char **output); diff --git a/dir.c b/dir.c index bc5d67ba8b73e1..248bc8de164525 100644 --- a/dir.c +++ b/dir.c @@ -18,6 +18,7 @@ #include "ewah/ewok.h" #include "fsmonitor.h" #include "submodule-config.h" +#include "virtualfilesystem.h" /* * Tells read_directory_recursive how a file or directory should be treated. @@ -1377,6 +1378,17 @@ enum pattern_match_result path_matches_pattern_list( int result = NOT_MATCHED; const char *slash_pos; + /* + * The virtual file system data is used to prevent git from traversing + * any part of the tree that is not in the virtual file system. Return + * 1 to exclude the entry if it is not found in the virtual file system, + * else fall through to the regular excludes logic as it may further exclude. + */ + if (*dtype == DT_UNKNOWN) + *dtype = resolve_dtype(DT_UNKNOWN, istate, pathname, pathlen); + if (is_excluded_from_virtualfilesystem(pathname, pathlen, *dtype) > 0) + return 1; + if (!pl->use_cone_patterns) { pattern = last_matching_pattern_from_list(pathname, pathlen, basename, dtype, pl, istate); @@ -1635,8 +1647,20 @@ struct path_pattern *last_matching_pattern(struct dir_struct *dir, int is_excluded(struct dir_struct *dir, struct index_state *istate, const char *pathname, int *dtype_p) { - struct path_pattern *pattern = - last_matching_pattern(dir, istate, pathname, dtype_p); + struct path_pattern *pattern; + + /* + * The virtual file system data is used to prevent git from traversing + * any part of the tree that is not in the virtual file system. Return + * 1 to exclude the entry if it is not found in the virtual file system, + * else fall through to the regular excludes logic as it may further exclude. + */ + if (*dtype_p == DT_UNKNOWN) + *dtype_p = resolve_dtype(DT_UNKNOWN, istate, pathname, strlen(pathname)); + if (is_excluded_from_virtualfilesystem(pathname, strlen(pathname), *dtype_p) > 0) + return 1; + + pattern = last_matching_pattern(dir, istate, pathname, dtype_p); if (pattern) return pattern->flags & PATTERN_FLAG_NEGATIVE ? 0 : 1; return 0; @@ -2181,6 +2205,8 @@ static enum path_treatment treat_path(struct dir_struct *dir, ignore_case); if (dtype != DT_DIR && has_path_in_index) return path_none; + if (is_excluded_from_virtualfilesystem(path->buf, path->len, dtype) > 0) + return path_excluded; /* * When we are looking at a directory P in the working tree, @@ -2385,6 +2411,8 @@ static void add_path_to_appropriate_result_list(struct dir_struct *dir, /* add the path to the appropriate result list */ switch (state) { case path_excluded: + if (is_excluded_from_virtualfilesystem(path->buf, path->len, DT_DIR) > 0) + break; if (dir->flags & DIR_SHOW_IGNORED) dir_add_name(dir, istate, path->buf, path->len); else if ((dir->flags & DIR_SHOW_IGNORED_TOO) || diff --git a/environment.c b/environment.c index bca4ade1a7e8d6..33ee0ad30a0d48 100644 --- a/environment.c +++ b/environment.c @@ -70,6 +70,7 @@ int grafts_replace_parents = 1; int core_apply_sparse_checkout; int core_sparse_checkout_cone; int core_gvfs; +const char *core_virtualfilesystem; int merge_log_config = -1; int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */ unsigned long pack_size_limit_cfg; diff --git a/read-cache.c b/read-cache.c index 381802b6dcbd64..fd3853217dc1c5 100644 --- a/read-cache.c +++ b/read-cache.c @@ -25,6 +25,7 @@ #include "fsmonitor.h" #include "thread-utils.h" #include "progress.h" +#include "virtualfilesystem.h" #include "gvfs.h" /* Mask for the name length in ce_flags in the on-disk index */ @@ -1920,6 +1921,7 @@ static void post_read_index_from(struct index_state *istate) tweak_untracked_cache(istate); tweak_split_index(istate); tweak_fsmonitor(istate); + apply_virtualfilesystem(istate); } static size_t estimate_cache_size_from_compressed(unsigned int entries) diff --git a/t/t1092-virtualfilesystem.sh b/t/t1092-virtualfilesystem.sh new file mode 100755 index 00000000000000..88ab4a6a6084df --- /dev/null +++ b/t/t1092-virtualfilesystem.sh @@ -0,0 +1,349 @@ +#!/bin/sh + +test_description='virtual file system tests' + +. ./test-lib.sh + +clean_repo () { + rm .git/index && + git -c core.virtualfilesystem= reset --hard HEAD && + git -c core.virtualfilesystem= clean -fd && + touch untracked.txt && + touch dir1/untracked.txt && + touch dir2/untracked.txt +} + +test_expect_success 'setup' ' + mkdir -p .git/hooks/ && + cat > .gitignore <<-\EOF && + .gitignore + expect* + actual* + EOF + mkdir -p dir1 && + touch dir1/file1.txt && + touch dir1/file2.txt && + mkdir -p dir2 && + touch dir2/file1.txt && + touch dir2/file2.txt && + git add . && + git commit -m "initial" && + git config --local core.virtualfilesystem .git/hooks/virtualfilesystem +' + +test_expect_success 'test hook parameters and version' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + if test "$#" -ne 1 + then + echo "$0: Exactly 1 argument expected" >&2 + exit 2 + fi + + if test "$1" != 1 + then + echo "$0: Unsupported hook version." >&2 + exit 1 + fi + EOF + git status && + write_script .git/hooks/virtualfilesystem <<-\EOF && + exit 3 + EOF + test_must_fail git status +' + +test_expect_success 'verify status is clean' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir2/file1.txt\0" + EOF + rm -f .git/index && + git checkout -f && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir2/file1.txt\0" + printf "dir1/file1.txt\0" + printf "dir1/file2.txt\0" + EOF + git status > actual && + cat > expected <<-\EOF && + On branch master + nothing to commit, working tree clean + EOF + test_i18ncmp expected actual +' + +test_expect_success 'verify skip-worktree bit is set for absolute path' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/file1.txt\0" + EOF + git ls-files -v > actual && + cat > expected <<-\EOF && + H dir1/file1.txt + S dir1/file2.txt + S dir2/file1.txt + S dir2/file2.txt + EOF + test_cmp expected actual +' + +test_expect_success 'verify skip-worktree bit is cleared for absolute path' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/file2.txt\0" + EOF + git ls-files -v > actual && + cat > expected <<-\EOF && + S dir1/file1.txt + H dir1/file2.txt + S dir2/file1.txt + S dir2/file2.txt + EOF + test_cmp expected actual +' + +test_expect_success 'verify folder wild cards' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/\0" + EOF + git ls-files -v > actual && + cat > expected <<-\EOF && + H dir1/file1.txt + H dir1/file2.txt + S dir2/file1.txt + S dir2/file2.txt + EOF + test_cmp expected actual +' + +test_expect_success 'verify folders not included are ignored' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/file1.txt\0" + printf "dir1/file2.txt\0" + EOF + mkdir -p dir1/dir2 && + touch dir1/a && + touch dir1/b && + touch dir1/dir2/a && + touch dir1/dir2/b && + git add . && + git ls-files -v > actual && + cat > expected <<-\EOF && + H dir1/file1.txt + H dir1/file2.txt + S dir2/file1.txt + S dir2/file2.txt + EOF + test_cmp expected actual +' + +test_expect_success 'verify including one file doesnt include the rest' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/file1.txt\0" + printf "dir1/file2.txt\0" + printf "dir1/dir2/a\0" + EOF + mkdir -p dir1/dir2 && + touch dir1/a && + touch dir1/b && + touch dir1/dir2/a && + touch dir1/dir2/b && + git add . && + git ls-files -v > actual && + cat > expected <<-\EOF && + H dir1/dir2/a + H dir1/file1.txt + H dir1/file2.txt + S dir2/file1.txt + S dir2/file2.txt + EOF + test_cmp expected actual +' + +test_expect_success 'verify files not listed are ignored by git clean -f -x' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "untracked.txt\0" + printf "dir1/\0" + EOF + mkdir -p dir3 && + touch dir3/untracked.txt && + git clean -f -x && + test ! -f untracked.txt && + test -d dir1 && + test -f dir1/file1.txt && + test -f dir1/file2.txt && + test ! -f dir1/untracked.txt && + test -f dir2/file1.txt && + test -f dir2/file2.txt && + test -f dir2/untracked.txt && + test -d dir3 && + test -f dir3/untracked.txt +' + +test_expect_success 'verify files not listed are ignored by git clean -f -d -x' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "untracked.txt\0" + printf "dir1/\0" + printf "dir3/\0" + EOF + mkdir -p dir3 && + touch dir3/untracked.txt && + git clean -f -d -x && + test ! -f untracked.txt && + test -d dir1 && + test -f dir1/file1.txt && + test -f dir1/file2.txt && + test ! -f dir1/untracked.txt && + test -f dir2/file1.txt && + test -f dir2/file2.txt && + test -f dir2/untracked.txt && + test ! -d dir3 && + test ! -f dir3/untracked.txt +' + +test_expect_success 'verify folder entries include all files' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/\0" + EOF + mkdir -p dir1/dir2 && + touch dir1/a && + touch dir1/b && + touch dir1/dir2/a && + touch dir1/dir2/b && + git status -su > actual && + cat > expected <<-\EOF && + ?? dir1/a + ?? dir1/b + ?? dir1/untracked.txt + EOF + test_cmp expected actual +' + +test_expect_success 'verify case insensitivity of virtual file system entries' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/a\0" + printf "Dir1/Dir2/a\0" + printf "DIR2/\0" + EOF + mkdir -p dir1/dir2 && + touch dir1/a && + touch dir1/b && + touch dir1/dir2/a && + touch dir1/dir2/b && + git -c core.ignorecase=false status -su > actual && + cat > expected <<-\EOF && + ?? dir1/a + EOF + test_cmp expected actual && + git -c core.ignorecase=true status -su > actual && + cat > expected <<-\EOF && + ?? dir1/a + ?? dir1/dir2/a + ?? dir2/untracked.txt + EOF + test_cmp expected actual +' + +test_expect_success 'on file created' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/file3.txt\0" + EOF + touch dir1/file3.txt && + git add . && + git ls-files -v > actual && + cat > expected <<-\EOF && + S dir1/file1.txt + S dir1/file2.txt + H dir1/file3.txt + S dir2/file1.txt + S dir2/file2.txt + EOF + test_cmp expected actual +' + +test_expect_success 'on file renamed' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/file1.txt\0" + printf "dir1/file3.txt\0" + EOF + mv dir1/file1.txt dir1/file3.txt && + git status -su > actual && + cat > expected <<-\EOF && + D dir1/file1.txt + ?? dir1/file3.txt + EOF + test_cmp expected actual +' + +test_expect_success 'on file deleted' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/file1.txt\0" + EOF + rm dir1/file1.txt && + git status -su > actual && + cat > expected <<-\EOF && + D dir1/file1.txt + EOF + test_cmp expected actual +' + +test_expect_success 'on file overwritten' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/file1.txt\0" + EOF + echo "overwritten" > dir1/file1.txt && + git status -su > actual && + cat > expected <<-\EOF && + M dir1/file1.txt + EOF + test_cmp expected actual +' + +test_expect_success 'on folder created' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/dir1/\0" + EOF + mkdir -p dir1/dir1 && + git status -su > actual && + cat > expected <<-\EOF && + EOF + test_cmp expected actual && + git clean -fd && + test ! -d "/dir1/dir1" +' + +test_expect_success 'on folder renamed' ' + clean_repo && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir3/\0" + printf "dir1/file1.txt\0" + printf "dir1/file2.txt\0" + printf "dir3/file1.txt\0" + printf "dir3/file2.txt\0" + EOF + mv dir1 dir3 && + git status -su > actual && + cat > expected <<-\EOF && + D dir1/file1.txt + D dir1/file2.txt + ?? dir3/file1.txt + ?? dir3/file2.txt + ?? dir3/untracked.txt + EOF + test_cmp expected actual +' + +test_done diff --git a/unpack-trees.c b/unpack-trees.c index d497bfe9b64d3b..c42c809a6e5b69 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -17,6 +17,7 @@ #include "object-store.h" #include "promisor-remote.h" #include "gvfs.h" +#include "virtualfilesystem.h" /* * Error messages expected by scripts out of plumbing commands such as @@ -1428,6 +1429,14 @@ static int clear_ce_flags_1(struct index_state *istate, continue; } + /* if it's not in the virtual file system, exit early */ + if (core_virtualfilesystem) { + if (is_included_in_virtualfilesystem(ce->name, ce->ce_namelen) > 0) + ce->ce_flags &= ~clear_mask; + cache++; + continue; + } + if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len)) break; @@ -1590,7 +1599,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options if (!o->skip_sparse_checkout && !o->pl) { memset(&pl, 0, sizeof(pl)); free_pattern_list = 1; - populate_from_existing_patterns(o, &pl); + if (core_virtualfilesystem) + o->pl = &pl; + else + populate_from_existing_patterns(o, &pl); } memset(&o->result, 0, sizeof(o->result)); diff --git a/virtualfilesystem.c b/virtualfilesystem.c new file mode 100644 index 00000000000000..9aa2ca239015dd --- /dev/null +++ b/virtualfilesystem.c @@ -0,0 +1,312 @@ +#include "cache.h" +#include "config.h" +#include "dir.h" +#include "hashmap.h" +#include "run-command.h" +#include "virtualfilesystem.h" + +#define HOOK_INTERFACE_VERSION (1) + +static struct strbuf virtual_filesystem_data = STRBUF_INIT; +static struct hashmap virtual_filesystem_hashmap; +static struct hashmap parent_directory_hashmap; + +struct virtualfilesystem { + struct hashmap_entry ent; /* must be the first member! */ + const char *pattern; + int patternlen; +}; + +static unsigned int(*vfshash)(const void *buf, size_t len); +static int(*vfscmp)(const char *a, const char *b, size_t len); + +static int vfs_hashmap_cmp(const void *unused_cmp_data, + const struct hashmap_entry *he1, + const struct hashmap_entry *he2, + const void *key) +{ + const struct virtualfilesystem *vfs1 = + container_of(he1, const struct virtualfilesystem, ent); + const struct virtualfilesystem *vfs2 = + container_of(he2, const struct virtualfilesystem, ent); + + return vfscmp(vfs1->pattern, vfs2->pattern, vfs1->patternlen); +} + +static void get_virtual_filesystem_data(struct strbuf *vfs_data) +{ + struct child_process cp = CHILD_PROCESS_INIT; + char ver[64]; + const char *argv[3]; + int err; + + strbuf_init(vfs_data, 0); + + snprintf(ver, sizeof(ver), "%d", HOOK_INTERFACE_VERSION); + argv[0] = core_virtualfilesystem; + argv[1] = ver; + argv[2] = NULL; + cp.argv = argv; + cp.use_shell = 1; + + err = capture_command(&cp, vfs_data, 1024); + if (err) + die("unable to load virtual file system"); +} + +static int check_includes_hashmap(struct hashmap *map, const char *pattern, int patternlen) +{ + struct strbuf sb = STRBUF_INIT; + struct virtualfilesystem vfs; + char *slash; + + /* Check straight mapping */ + strbuf_reset(&sb); + strbuf_add(&sb, pattern, patternlen); + vfs.pattern = sb.buf; + vfs.patternlen = sb.len; + hashmap_entry_init(&vfs.ent, vfshash(vfs.pattern, vfs.patternlen)); + if (hashmap_get_entry(map, &vfs, ent, NULL)) { + strbuf_release(&sb); + return 1; + } + + /* + * Check to see if it matches a directory or any path + * underneath it. In other words, 'a/b/foo.txt' will match + * '/', 'a/', and 'a/b/'. + */ + slash = strchr(sb.buf, '/'); + while (slash) { + vfs.pattern = sb.buf; + vfs.patternlen = slash - sb.buf + 1; + hashmap_entry_init(&vfs.ent, vfshash(vfs.pattern, vfs.patternlen)); + if (hashmap_get_entry(map, &vfs, ent, NULL)) { + strbuf_release(&sb); + return 1; + } + slash = strchr(slash + 1, '/'); + } + + strbuf_release(&sb); + return 0; +} + +static void includes_hashmap_add(struct hashmap *map, const char *pattern, const int patternlen) +{ + struct virtualfilesystem *vfs; + + vfs = xmalloc(sizeof(struct virtualfilesystem)); + vfs->pattern = pattern; + vfs->patternlen = patternlen; + hashmap_entry_init(&vfs->ent, vfshash(vfs->pattern, vfs->patternlen)); + hashmap_add(map, &vfs->ent); +} + +static void initialize_includes_hashmap(struct hashmap *map, struct strbuf *vfs_data) +{ + char *buf, *entry; + size_t len; + int i; + + /* + * Build a hashmap of the virtual file system data we can use to look + * for cache entry matches quickly + */ + vfshash = ignore_case ? memihash : memhash; + vfscmp = ignore_case ? strncasecmp : strncmp; + hashmap_init(map, vfs_hashmap_cmp, NULL, 0); + + entry = buf = vfs_data->buf; + len = vfs_data->len; + for (i = 0; i < len; i++) { + if (buf[i] == '\0') { + includes_hashmap_add(map, entry, buf + i - entry); + entry = buf + i + 1; + } + } +} + +/* + * Return 1 if the requested item is found in the virtual file system, + * 0 for not found and -1 for undecided. + */ +int is_included_in_virtualfilesystem(const char *pathname, int pathlen) +{ + if (!core_virtualfilesystem) + return -1; + + if (!virtual_filesystem_hashmap.tablesize && virtual_filesystem_data.len) + initialize_includes_hashmap(&virtual_filesystem_hashmap, &virtual_filesystem_data); + if (!virtual_filesystem_hashmap.tablesize) + return -1; + + return check_includes_hashmap(&virtual_filesystem_hashmap, pathname, pathlen); +} + +static void parent_directory_hashmap_add(struct hashmap *map, const char *pattern, const int patternlen) +{ + char *slash; + struct virtualfilesystem *vfs; + + /* + * Add any directories leading up to the file as the excludes logic + * needs to match directories leading up to the files as well. Detect + * and prevent unnecessary duplicate entries which will be common. + */ + if (patternlen > 1) { + slash = strchr(pattern + 1, '/'); + while (slash) { + vfs = xmalloc(sizeof(struct virtualfilesystem)); + vfs->pattern = pattern; + vfs->patternlen = slash - pattern + 1; + hashmap_entry_init(&vfs->ent, vfshash(vfs->pattern, vfs->patternlen)); + if (hashmap_get_entry(map, vfs, ent, NULL)) + free(vfs); + else + hashmap_add(map, &vfs->ent); + slash = strchr(slash + 1, '/'); + } + } +} + +static void initialize_parent_directory_hashmap(struct hashmap *map, struct strbuf *vfs_data) +{ + char *buf, *entry; + size_t len; + int i; + + /* + * Build a hashmap of the parent directories contained in the virtual + * file system data we can use to look for matches quickly + */ + vfshash = ignore_case ? memihash : memhash; + vfscmp = ignore_case ? strncasecmp : strncmp; + hashmap_init(map, vfs_hashmap_cmp, NULL, 0); + + entry = buf = vfs_data->buf; + len = vfs_data->len; + for (i = 0; i < len; i++) { + if (buf[i] == '\0') { + parent_directory_hashmap_add(map, entry, buf + i - entry); + entry = buf + i + 1; + } + } +} + +static int check_directory_hashmap(struct hashmap *map, const char *pathname, int pathlen) +{ + struct strbuf sb = STRBUF_INIT; + struct virtualfilesystem vfs; + + /* Check for directory */ + strbuf_reset(&sb); + strbuf_add(&sb, pathname, pathlen); + strbuf_addch(&sb, '/'); + vfs.pattern = sb.buf; + vfs.patternlen = sb.len; + hashmap_entry_init(&vfs.ent, vfshash(vfs.pattern, vfs.patternlen)); + if (hashmap_get_entry(map, &vfs, ent, NULL)) { + strbuf_release(&sb); + return 0; + } + + strbuf_release(&sb); + return 1; +} + +/* + * Return 1 for exclude, 0 for include and -1 for undecided. + */ +int is_excluded_from_virtualfilesystem(const char *pathname, int pathlen, int dtype) +{ + if (!core_virtualfilesystem) + return -1; + + if (dtype != DT_REG && dtype != DT_DIR && dtype != DT_LNK) + die(_("is_excluded_from_virtualfilesystem passed unhandled dtype")); + + if (dtype == DT_REG) { + int ret = is_included_in_virtualfilesystem(pathname, pathlen); + if (ret > 0) + return 0; + if (ret == 0) + return 1; + return ret; + } + + if (dtype == DT_DIR || dtype == DT_LNK) { + if (!parent_directory_hashmap.tablesize && virtual_filesystem_data.len) + initialize_parent_directory_hashmap(&parent_directory_hashmap, &virtual_filesystem_data); + if (!parent_directory_hashmap.tablesize) + return -1; + + return check_directory_hashmap(&parent_directory_hashmap, pathname, pathlen); + } + + return -1; +} + +/* + * Update the CE_SKIP_WORKTREE bits based on the virtual file system. + */ +void apply_virtualfilesystem(struct index_state *istate) +{ + char *buf, *entry; + int i; + + if (!git_config_get_virtualfilesystem()) + return; + + if (!virtual_filesystem_data.len) + get_virtual_filesystem_data(&virtual_filesystem_data); + + /* set CE_SKIP_WORKTREE bit on all entries */ + for (i = 0; i < istate->cache_nr; i++) + istate->cache[i]->ce_flags |= CE_SKIP_WORKTREE; + + /* clear CE_SKIP_WORKTREE bit for everything in the virtual file system */ + entry = buf = virtual_filesystem_data.buf; + for (i = 0; i < virtual_filesystem_data.len; i++) { + if (buf[i] == '\0') { + int pos, len; + + len = buf + i - entry; + + /* look for a directory wild card (ie "dir1/") */ + if (buf[i - 1] == '/') { + if (ignore_case) + adjust_dirname_case(istate, entry); + pos = index_name_pos(istate, entry, len - 1); + if (pos < 0) { + pos = -pos - 1; + while (pos < istate->cache_nr && !fspathncmp(istate->cache[pos]->name, entry, len)) { + istate->cache[pos]->ce_flags &= ~CE_SKIP_WORKTREE; + pos++; + } + } + } else { + if (ignore_case) { + struct cache_entry *ce = index_file_exists(istate, entry, len, ignore_case); + if (ce) + ce->ce_flags &= ~CE_SKIP_WORKTREE; + } else { + int pos = index_name_pos(istate, entry, len); + if (pos >= 0) + istate->cache[pos]->ce_flags &= ~CE_SKIP_WORKTREE; + } + } + + entry += len + 1; + } + } +} + +/* + * Free the virtual file system data structures. + */ +void free_virtualfilesystem(void) { + hashmap_free_entries(&virtual_filesystem_hashmap, struct virtualfilesystem, ent); + hashmap_free_entries(&parent_directory_hashmap, struct virtualfilesystem, ent); + strbuf_release(&virtual_filesystem_data); +} diff --git a/virtualfilesystem.h b/virtualfilesystem.h new file mode 100644 index 00000000000000..5e8c5b096df09a --- /dev/null +++ b/virtualfilesystem.h @@ -0,0 +1,25 @@ +#ifndef VIRTUALFILESYSTEM_H +#define VIRTUALFILESYSTEM_H + +/* + * Update the CE_SKIP_WORKTREE bits based on the virtual file system. + */ +void apply_virtualfilesystem(struct index_state *istate); + +/* + * Return 1 if the requested item is found in the virtual file system, + * 0 for not found and -1 for undecided. + */ +int is_included_in_virtualfilesystem(const char *pathname, int pathlen); + +/* + * Return 1 for exclude, 0 for include and -1 for undecided. + */ +int is_excluded_from_virtualfilesystem(const char *pathname, int pathlen, int dtype); + +/* + * Free the virtual file system data structures. + */ +void free_virtualfilesystem(void); + +#endif From 892ddf7bad57d7fe64dc2614914893717aa6475f Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Fri, 16 Nov 2018 11:28:59 -0700 Subject: [PATCH 041/104] send-pack: do not check for sha1 file when GVFS_MISSING_OK set --- send-pack.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/send-pack.c b/send-pack.c index a91b2502116de0..b73473926d2a0a 100644 --- a/send-pack.c +++ b/send-pack.c @@ -2,6 +2,7 @@ #include "config.h" #include "commit.h" #include "refs.h" +#include "gvfs.h" #include "object-store.h" #include "pkt-line.h" #include "sideband.h" @@ -51,7 +52,7 @@ static int send_pack_config(const char *var, const char *value, void *unused) static void feed_object(const struct object_id *oid, FILE *fh, int negative) { - if (negative && + if (negative && !gvfs_config_is_set(GVFS_MISSING_OK) && !has_object_file_with_flags(oid, OBJECT_INFO_SKIP_FETCH_OBJECT | OBJECT_INFO_QUICK)) From 1b6a05518979ab901cd384130eea7a889c0e1b82 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 30 Apr 2019 14:12:51 -0400 Subject: [PATCH 042/104] trace2:gvfs:experiment: clear_ce_flags_1 Signed-off-by: Jeff Hostetler --- unpack-trees.c | 1 + 1 file changed, 1 insertion(+) diff --git a/unpack-trees.c b/unpack-trees.c index c42c809a6e5b69..792fd815fe194d 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -1513,6 +1513,7 @@ static int clear_ce_flags(struct index_state *istate, xsnprintf(label, sizeof(label), "clear_ce_flags(0x%08lx,0x%08lx)", (unsigned long)select_mask, (unsigned long)clear_mask); trace2_region_enter("unpack_trees", label, the_repository); + rval = clear_ce_flags_1(istate, istate->cache, istate->cache_nr, From b78a80352fd8ba4bdf54c5c95979352fb713c6f4 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 25 Jun 2019 16:38:50 -0400 Subject: [PATCH 043/104] status: deserialize with -uno does not print correct hint With the "--untracked-files=complete" option status computes a superset of the untracked files. We use this when writing the status cache. If subsequent deserialize commands ask for either the complete set or one of the "no", "normal", or "all" subsets, it can still use the cache file because of filtering in the deserialize parser. When running status with the "-uno" option, the long format status would print a "(use -u to show untracked files)" hint. When deserializing with the "-uno" option and using a cache computed with "-ucomplete", the "nothing to commit, working tree clean" message would be printed instead of the hint. It was easy to miss because the correct hint message was printed if the cache was rejected for any reason (and status did the full fallback). The "struct wt_status des" structure was initialized with the content of the status cache (and thus defaulted to "complete"). This change sets "des.show_untracked_files" to the requested subset from the command-line or config. This allows the long format to print the hint. Signed-off-by: Jeff Hostetler --- t/t7524-serialized-status.sh | 55 ++++++++++++++++++++++++++++++++++++ wt-status-deserialize.c | 16 +++++++---- 2 files changed, 65 insertions(+), 6 deletions(-) diff --git a/t/t7524-serialized-status.sh b/t/t7524-serialized-status.sh index 46eb746fa1e357..743ecd59b2827d 100755 --- a/t/t7524-serialized-status.sh +++ b/t/t7524-serialized-status.sh @@ -332,4 +332,59 @@ test_expect_success 'renames' ' test_i18ncmp output.1 output.2 ' +test_expect_success 'hint message when cached with u=complete' ' + git init hint && + echo xxx >hint/xxx && + git -C hint add xxx && + git -C hint commit -m xxx && + + cat >expect.clean <expect.use_u <hint.output_normal && + test_i18ncmp expect.clean hint.output_normal && + + git -C hint status --untracked-files=all >hint.output_all && + test_i18ncmp expect.clean hint.output_all && + + git -C hint status --untracked-files=no >hint.output_no && + test_i18ncmp expect.use_u hint.output_no && + + # Create long format output for "complete" and create status cache. + + git -C hint status --untracked-files=complete --ignored=matching --serialize=../hint.dat >hint.output_complete && + test_i18ncmp expect.clean hint.output_complete && + + # Capture long format output using the status cache and verify + # that the output matches the non-cached version. There are 2 + # ways to specify untracked-files, so do them both. + + git -C hint status --deserialize=../hint.dat -unormal >hint.d1_normal && + test_i18ncmp expect.clean hint.d1_normal && + git -C hint -c status.showuntrackedfiles=normal status --deserialize=../hint.dat >hint.d2_normal && + test_i18ncmp expect.clean hint.d2_normal && + + git -C hint status --deserialize=../hint.dat -uall >hint.d1_all && + test_i18ncmp expect.clean hint.d1_all && + git -C hint -c status.showuntrackedfiles=all status --deserialize=../hint.dat >hint.d2_all && + test_i18ncmp expect.clean hint.d2_all && + + git -C hint status --deserialize=../hint.dat -uno >hint.d1_no && + test_i18ncmp expect.use_u hint.d1_no && + git -C hint -c status.showuntrackedfiles=no status --deserialize=../hint.dat >hint.d2_no && + test_i18ncmp expect.use_u hint.d2_no + +' + test_done diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c index 61c8fbc44bffa1..74eb0aa32e1d40 100644 --- a/wt-status-deserialize.c +++ b/wt-status-deserialize.c @@ -415,20 +415,24 @@ static int wt_deserialize_v1_ignored_items(struct wt_status *s, } static int validate_untracked_files_arg(enum untracked_status_type cmd, - enum untracked_status_type des, + enum untracked_status_type *des, enum deserialize_parse_strategy *strategy) { *strategy = DESERIALIZE_STRATEGY_AS_IS; - if (cmd == des) { + if (cmd == *des) { *strategy = DESERIALIZE_STRATEGY_AS_IS; } else if (cmd == SHOW_NO_UNTRACKED_FILES) { *strategy = DESERIALIZE_STRATEGY_SKIP; - } else if (des == SHOW_COMPLETE_UNTRACKED_FILES) { - if (cmd == SHOW_ALL_UNTRACKED_FILES) + *des = cmd; + } else if (*des == SHOW_COMPLETE_UNTRACKED_FILES) { + if (cmd == SHOW_ALL_UNTRACKED_FILES) { *strategy = DESERIALIZE_STRATEGY_ALL; - else if (cmd == SHOW_NORMAL_UNTRACKED_FILES) + *des = cmd; + } else if (cmd == SHOW_NORMAL_UNTRACKED_FILES) { *strategy = DESERIALIZE_STRATEGY_NORMAL; + *des = cmd; + } } else { return DESERIALIZE_ERR; } @@ -470,7 +474,7 @@ static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s, * We now have the header parsed. Look at the command args (as passed in), and see how to parse * the serialized data */ - if (validate_untracked_files_arg(cmd_s->show_untracked_files, s->show_untracked_files, &untracked_strategy)) { + if (validate_untracked_files_arg(cmd_s->show_untracked_files, &s->show_untracked_files, &untracked_strategy)) { trace_printf_key(&trace_deserialize, "reject: show_untracked_file: command: %d, serialized : %d", cmd_s->show_untracked_files, s->show_untracked_files); From d5e00e25b93a2d588bf535f58254565ba0a61872 Mon Sep 17 00:00:00 2001 From: Ben Peart Date: Thu, 7 Jun 2018 13:49:01 -0400 Subject: [PATCH 044/104] Update the virtualfilesystem support We now specify that it needs to be run from the root of the git work tree. This enables the hook to be found even if the current working directory is not the root of the repo (like when running 'git diff' with Beyond Compare configured as the diff tool. Also simpify how argv[] parameter is created. Signed-off-by: Ben Peart --- virtualfilesystem.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/virtualfilesystem.c b/virtualfilesystem.c index 9aa2ca239015dd..de160cd5ee865f 100644 --- a/virtualfilesystem.c +++ b/virtualfilesystem.c @@ -36,18 +36,14 @@ static int vfs_hashmap_cmp(const void *unused_cmp_data, static void get_virtual_filesystem_data(struct strbuf *vfs_data) { struct child_process cp = CHILD_PROCESS_INIT; - char ver[64]; - const char *argv[3]; int err; strbuf_init(vfs_data, 0); - snprintf(ver, sizeof(ver), "%d", HOOK_INTERFACE_VERSION); - argv[0] = core_virtualfilesystem; - argv[1] = ver; - argv[2] = NULL; - cp.argv = argv; + argv_array_push(&cp.args, core_virtualfilesystem); + argv_array_pushf(&cp.args, "%d", HOOK_INTERFACE_VERSION); cp.use_shell = 1; + cp.dir = get_git_work_tree(); err = capture_command(&cp, vfs_data, 1024); if (err) From 8f48b096dd181aa3146c919b27820da6415ada8d Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Mon, 3 Jul 2017 13:39:45 -0600 Subject: [PATCH 045/104] cache-tree: remove use of strbuf_addf in update_one String formatting can be a performance issue when there are hundreds of thousands of trees. Change to stop using the strbuf_addf and just add the strings or characters individually. There are a limited number of modes so added a switch for the known ones and a default case if something comes through that are not a known one for git. In one scenario regarding a huge worktree, this reduces the time required for a `git checkout ` from 44 seconds to 38 seconds, i.e. it is a non-negligible performance improvement. Signed-off-by: Kevin Willford --- cache-tree.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/cache-tree.c b/cache-tree.c index 9a59dafa26a07d..cc4dfd8c53d402 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -398,7 +398,29 @@ static int update_one(struct cache_tree *it, continue; strbuf_grow(&buffer, entlen + 100); - strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0'); + + switch (mode) { + case 0100644: + strbuf_add(&buffer, "100644 ", 7); + break; + case 0100664: + strbuf_add(&buffer, "100664 ", 7); + break; + case 0100755: + strbuf_add(&buffer, "100755 ", 7); + break; + case 0120000: + strbuf_add(&buffer, "120000 ", 7); + break; + case 0160000: + strbuf_add(&buffer, "160000 ", 7); + break; + default: + strbuf_addf(&buffer, "%o ", mode); + break; + } + strbuf_add(&buffer, path + baselen, entlen); + strbuf_addch(&buffer, '\0'); strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz); #if DEBUG_CACHE_TREE From 4d00479344f0660afaa374b70304cf9f24959d59 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 30 Apr 2019 15:27:40 -0400 Subject: [PATCH 046/104] trace2:gvfs:experiment: traverse_trees Signed-off-by: Jeff Hostetler --- unpack-trees.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unpack-trees.c b/unpack-trees.c index 792fd815fe194d..7d3b5deee01211 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -1671,7 +1671,9 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options } trace_performance_enter(); + trace2_region_enter("exp", "traverse_trees", the_repository); ret = traverse_trees(o->src_index, len, t, &info); + trace2_region_leave("exp", "traverse_trees", the_repository); trace_performance_leave("traverse_trees"); if (ret < 0) goto return_failed; From 7f49dd21e2cb1b3ee45e29910a6b3cf00c077f83 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Fri, 10 Apr 2020 19:52:27 -0400 Subject: [PATCH 047/104] wt-status-deserialize: fix crash when -v is used Fix crash in `git status -v` by setting `des_s->repo` to a non-null value. Upstream changes to eliminate use of `the_repository` added a `repo` field to `struct status`. And calls in `wt-status.c` to `repo_init_revisions()` were changed to pass `s->repo` rather than `the_repository`. The status deserialization code was not updated to actually set `s->repo` before common code passed the value to OID routines. This caused a segfault when verbose output was requested. Signed-off-by: Jeff Hostetler --- wt-status-deserialize.c | 1 + 1 file changed, 1 insertion(+) diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c index 74eb0aa32e1d40..27e3bb0f053f1f 100644 --- a/wt-status-deserialize.c +++ b/wt-status-deserialize.c @@ -649,6 +649,7 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de /* * Copy over display-related fields from the current command. */ + des_s->repo = cmd_s->repo; des_s->verbose = cmd_s->verbose; /* amend */ /* whence */ From 513d5eceb4e6131cb93d398850ab97155e24934b Mon Sep 17 00:00:00 2001 From: Ben Peart Date: Wed, 1 Aug 2018 13:26:22 -0400 Subject: [PATCH 048/104] virtualfilesystem: don't run the virtual file system hook if the index has been redirected Fixes #13 Some git commands spawn helpers and redirect the index to a different location. These include "difftool -d" and the sequencer (i.e. `git rebase -i`, `git cherry-pick` and `git revert`) and others. In those instances we don't want to update their temporary index with our virtualization data. Helped-by: Johannes Schindelin Signed-off-by: Ben Peart --- config.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/config.c b/config.c index 3be44a69a9607a..16626712043e05 100644 --- a/config.c +++ b/config.c @@ -2371,10 +2371,24 @@ int git_config_get_virtualfilesystem(void) if (core_virtualfilesystem && !*core_virtualfilesystem) core_virtualfilesystem = NULL; - /* virtual file system relies on the sparse checkout logic so force it on */ if (core_virtualfilesystem) { - core_apply_sparse_checkout = 1; - return 1; + /* + * Some git commands spawn helpers and redirect the index to a different + * location. These include "difftool -d" and the sequencer + * (i.e. `git rebase -i`, `git cherry-pick` and `git revert`) and others. + * In those instances we don't want to update their temporary index with + * our virtualization data. + */ + char *default_index_file = xstrfmt("%s/%s", the_repository->gitdir, "index"); + int should_run_hook = !strcmp(default_index_file, the_repository->index_file); + + free(default_index_file); + if (should_run_hook) { + /* virtual file system relies on the sparse checkout logic so force it on */ + core_apply_sparse_checkout = 1; + return 1; + } + core_virtualfilesystem = NULL; } return 0; From 19044c0eecf5d529dacb057d41918d360c45f62f Mon Sep 17 00:00:00 2001 From: Ben Peart Date: Thu, 6 Dec 2018 11:09:19 -0500 Subject: [PATCH 049/104] gvfs: block unsupported commands when running in a GVFS repo The following commands and options are not currently supported when working in a GVFS repo. Add code to detect and block these commands from executing. 1) fsck 2) gc 4) prune 5) repack 6) submodule 8) update-index --split-index 9) update-index --index-version (other than 4) 10) update-index --[no-]skip-worktree 11) worktree Signed-off-by: Ben Peart --- builtin/gc.c | 4 ++++ builtin/update-index.c | 10 ++++++++ git.c | 15 ++++++++---- gvfs.h | 1 + t/t0402-block-command-on-gvfs.sh | 39 ++++++++++++++++++++++++++++++++ 5 files changed, 64 insertions(+), 5 deletions(-) create mode 100755 t/t0402-block-command-on-gvfs.sh diff --git a/builtin/gc.c b/builtin/gc.c index 8e0b9cf41b3d3f..dfed42eac14ef7 100644 --- a/builtin/gc.c +++ b/builtin/gc.c @@ -28,6 +28,7 @@ #include "blob.h" #include "tree.h" #include "promisor-remote.h" +#include "gvfs.h" #define FAILED_RUN "failed to run %s" @@ -585,6 +586,9 @@ int cmd_gc(int argc, const char **argv, const char *prefix) if (quiet) argv_array_push(&repack, "-q"); + if ((!auto_gc || (auto_gc && gc_auto_threshold > 0)) && gvfs_config_is_set(GVFS_BLOCK_COMMANDS)) + die(_("'git gc' is not supported on a GVFS repo")); + if (auto_gc) { /* * Auto-gc should be least intrusive as possible. diff --git a/builtin/update-index.c b/builtin/update-index.c index 79087bccea4b8b..669c5a411e6b73 100644 --- a/builtin/update-index.c +++ b/builtin/update-index.c @@ -18,6 +18,7 @@ #include "dir.h" #include "split-index.h" #include "fsmonitor.h" +#include "gvfs.h" /* * Default to not allowing changes to the list of files. The @@ -1133,7 +1134,13 @@ int cmd_update_index(int argc, const char **argv, const char *prefix) argc = parse_options_end(&ctx); getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf; + if (mark_skip_worktree_only && gvfs_config_is_set(GVFS_BLOCK_COMMANDS)) + die(_("modifying the skip worktree bit is not supported on a GVFS repo")); + if (preferred_index_format) { + if (preferred_index_format != 4 && gvfs_config_is_set(GVFS_BLOCK_COMMANDS)) + die(_("changing the index version is not supported on a GVFS repo")); + if (preferred_index_format < INDEX_FORMAT_LB || INDEX_FORMAT_UB < preferred_index_format) die("index-version %d not in range: %d..%d", @@ -1169,6 +1176,9 @@ int cmd_update_index(int argc, const char **argv, const char *prefix) } if (split_index > 0) { + if (gvfs_config_is_set(GVFS_BLOCK_COMMANDS)) + die(_("split index is not supported on a GVFS repo")); + if (git_config_get_split_index() == 0) warning(_("core.splitIndex is set to false; " "remove or change it, if you really want to " diff --git a/git.c b/git.c index f5a872a8e4449d..ed4cae3dfbe647 100644 --- a/git.c +++ b/git.c @@ -6,6 +6,7 @@ #include "alias.h" #include "shallow.h" #include "dir.h" +#include "gvfs.h" #define RUN_SETUP (1<<0) #define RUN_SETUP_GENTLY (1<<1) @@ -18,6 +19,7 @@ #define SUPPORT_SUPER_PREFIX (1<<4) #define DELAY_PAGER_CONFIG (1<<5) #define NO_PARSEOPT (1<<6) /* parse-options is not used */ +#define BLOCK_ON_GVFS_REPO (1<<7) /* command not allowed in GVFS repos */ struct cmd_struct { const char *cmd; @@ -498,6 +500,9 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) if (!help && p->option & NEED_WORK_TREE) setup_work_tree(); + if (!help && p->option & BLOCK_ON_GVFS_REPO && gvfs_config_is_set(GVFS_BLOCK_COMMANDS)) + die("'git %s' is not supported on a GVFS repo", p->cmd); + if (run_pre_command_hook(argv)) die("pre-command hook aborted command"); @@ -574,7 +579,7 @@ static struct cmd_struct commands[] = { { "fmt-merge-msg", cmd_fmt_merge_msg, RUN_SETUP }, { "for-each-ref", cmd_for_each_ref, RUN_SETUP }, { "format-patch", cmd_format_patch, RUN_SETUP }, - { "fsck", cmd_fsck, RUN_SETUP }, + { "fsck", cmd_fsck, RUN_SETUP | BLOCK_ON_GVFS_REPO}, { "fsck-objects", cmd_fsck, RUN_SETUP }, { "gc", cmd_gc, RUN_SETUP }, { "get-tar-commit-id", cmd_get_tar_commit_id, NO_PARSEOPT }, @@ -612,7 +617,7 @@ static struct cmd_struct commands[] = { { "pack-refs", cmd_pack_refs, RUN_SETUP }, { "patch-id", cmd_patch_id, RUN_SETUP_GENTLY | NO_PARSEOPT }, { "pickaxe", cmd_blame, RUN_SETUP }, - { "prune", cmd_prune, RUN_SETUP }, + { "prune", cmd_prune, RUN_SETUP | BLOCK_ON_GVFS_REPO}, { "prune-packed", cmd_prune_packed, RUN_SETUP }, { "pull", cmd_pull, RUN_SETUP | NEED_WORK_TREE }, { "push", cmd_push, RUN_SETUP }, @@ -625,7 +630,7 @@ static struct cmd_struct commands[] = { { "remote", cmd_remote, RUN_SETUP }, { "remote-ext", cmd_remote_ext, NO_PARSEOPT }, { "remote-fd", cmd_remote_fd, NO_PARSEOPT }, - { "repack", cmd_repack, RUN_SETUP }, + { "repack", cmd_repack, RUN_SETUP | BLOCK_ON_GVFS_REPO }, { "replace", cmd_replace, RUN_SETUP }, { "rerere", cmd_rerere, RUN_SETUP }, { "reset", cmd_reset, RUN_SETUP }, @@ -645,7 +650,7 @@ static struct cmd_struct commands[] = { { "stash", cmd_stash, RUN_SETUP | NEED_WORK_TREE }, { "status", cmd_status, RUN_SETUP | NEED_WORK_TREE }, { "stripspace", cmd_stripspace }, - { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX | NO_PARSEOPT }, + { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX | NO_PARSEOPT | BLOCK_ON_GVFS_REPO }, { "switch", cmd_switch, RUN_SETUP | NEED_WORK_TREE }, { "symbolic-ref", cmd_symbolic_ref, RUN_SETUP }, { "tag", cmd_tag, RUN_SETUP | DELAY_PAGER_CONFIG }, @@ -663,7 +668,7 @@ static struct cmd_struct commands[] = { { "verify-tag", cmd_verify_tag, RUN_SETUP }, { "version", cmd_version }, { "whatchanged", cmd_whatchanged, RUN_SETUP }, - { "worktree", cmd_worktree, RUN_SETUP | NO_PARSEOPT }, + { "worktree", cmd_worktree, RUN_SETUP | NO_PARSEOPT | BLOCK_ON_GVFS_REPO }, { "write-tree", cmd_write_tree, RUN_SETUP }, }; diff --git a/gvfs.h b/gvfs.h index 7c9367866f502a..e193502151467a 100644 --- a/gvfs.h +++ b/gvfs.h @@ -12,6 +12,7 @@ * The list of bits in the core_gvfs setting */ #define GVFS_SKIP_SHA_ON_INDEX (1 << 0) +#define GVFS_BLOCK_COMMANDS (1 << 1) #define GVFS_MISSING_OK (1 << 2) #define GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT (1 << 3) #define GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK (1 << 4) diff --git a/t/t0402-block-command-on-gvfs.sh b/t/t0402-block-command-on-gvfs.sh new file mode 100755 index 00000000000000..3370abad464d50 --- /dev/null +++ b/t/t0402-block-command-on-gvfs.sh @@ -0,0 +1,39 @@ +#!/bin/sh + +test_description='block commands in GVFS repo' + +. ./test-lib.sh + +not_with_gvfs () { + command=$1 && + shift && + test_expect_success "test $command $*" " + test_config alias.g4rbled $command && + test_config core.gvfs true && + test_must_fail git $command $* && + test_must_fail git g4rbled $* && + test_unconfig core.gvfs && + test_must_fail git -c core.gvfs=true $command $* && + test_must_fail git -c core.gvfs=true g4rbled $* + " +} + +not_with_gvfs fsck +not_with_gvfs gc +not_with_gvfs gc --auto +not_with_gvfs prune +not_with_gvfs repack +not_with_gvfs submodule status +not_with_gvfs update-index --index-version 2 +not_with_gvfs update-index --skip-worktree +not_with_gvfs update-index --no-skip-worktree +not_with_gvfs update-index --split-index +not_with_gvfs worktree list + +test_expect_success 'test gc --auto succeeds when disabled via config' ' + test_config core.gvfs true && + test_config gc.auto 0 && + git gc --auto +' + +test_done From 02fecff252ad5d899f3c95d0929766fd7f0063a7 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 30 Apr 2019 16:02:39 -0400 Subject: [PATCH 050/104] trace2:gvfs:experiment: report_tracking Signed-off-by: Jeff Hostetler --- builtin/checkout.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/builtin/checkout.c b/builtin/checkout.c index ca620dc0e2afbb..7f14cea9b39555 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -933,8 +933,11 @@ static void update_refs_for_switch(const struct checkout_opts *opts, remove_branch_state(the_repository, !opts->quiet); strbuf_release(&msg); if (!opts->quiet && - (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD")))) + (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD")))) { + trace2_region_enter("exp", "report_tracking", the_repository); report_tracking(new_branch_info); + trace2_region_leave("exp", "report_tracking", the_repository); + } } static int add_pending_uninteresting_ref(const char *refname, From cbe29945ecca4ebfec8466d172879eab1530f80c Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Thu, 21 Nov 2019 12:01:04 -0700 Subject: [PATCH 051/104] fsmonitor: check CE_FSMONITOR_VALID in ce_uptodate When using fsmonitor the CE_FSMONITOR_VALID flag should be checked when wanting to know if the entry has been updated. If the flag is set the entry should be considered up to date and the same as if the CE_UPTODATE is set. In order to trust the CE_FSMONITOR_VALID flag, the fsmonitor data needs to be refreshed when the fsmonitor bitmap is applied to the index in tweak_fsmonitor. Since the fsmonitor data is kept up to date for every command, some tests needed to be updated to take that into account. istate->untracked->use_fsmonitor was set in tweak_fsmonitor when the fsmonitor bitmap data was loaded and is now in refresh_fsmonitor since that is being called in tweak_fsmonitor. refresh_fsmonitor will only be called once and any other callers should be setting it when refreshing the fsmonitor data so that code can use the fsmonitor data when checking untracked files. When writing the index, fsmonitor_last_update is used to determine if the fsmonitor bitmap should be created and the extension data written to the index. When running through unpack-trees this is not copied to the result index. This makes the next time a git command is ran do all the work of lstating all files to determine what is clean since all entries in the index are marked as dirty since there wasn't any fsmonitor data saved in the index extension. Copying the fsmonitor_last_update to the result index will cause the extension data for fsmonitor to be in the index for the next git command to use. Signed-off-by: Kevin Willford --- cache.h | 2 +- fsmonitor.c | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/cache.h b/cache.h index 38b269d8100a6e..9d14e512f90854 100644 --- a/cache.h +++ b/cache.h @@ -239,7 +239,7 @@ static inline unsigned create_ce_flags(unsigned stage) #define ce_namelen(ce) ((ce)->ce_namelen) #define ce_size(ce) cache_entry_size(ce_namelen(ce)) #define ce_stage(ce) ((CE_STAGEMASK & (ce)->ce_flags) >> CE_STAGESHIFT) -#define ce_uptodate(ce) ((ce)->ce_flags & CE_UPTODATE) +#define ce_uptodate(ce) (((ce)->ce_flags & CE_UPTODATE) || ((ce)->ce_flags & CE_FSMONITOR_VALID)) #define ce_skip_worktree(ce) ((ce)->ce_flags & CE_SKIP_WORKTREE) #define ce_mark_uptodate(ce) ((ce)->ce_flags |= CE_UPTODATE) #define ce_intent_to_add(ce) ((ce)->ce_flags & CE_INTENT_TO_ADD) diff --git a/fsmonitor.c b/fsmonitor.c index 932bd9012daf9e..be47d7d7a41b75 100644 --- a/fsmonitor.c +++ b/fsmonitor.c @@ -261,10 +261,13 @@ void refresh_fsmonitor(struct index_state *istate) if (istate->untracked) istate->untracked->use_fsmonitor = 1; } else { - - /* We only want to run the post index changed hook if we've actually changed entries, so keep track - * if we actually changed entries or not */ + /* + * We only want to run the post index changed hook if we've + * actually changed entries, so keep track if we actually + * changed entries or not + */ int is_cache_changed = 0; + /* Mark all entries invalid */ for (i = 0; i < istate->cache_nr; i++) { if (istate->cache[i]->ce_flags & CE_FSMONITOR_VALID) { @@ -273,7 +276,7 @@ void refresh_fsmonitor(struct index_state *istate) } } - /* If we're going to check every file, ensure we save the results */ + /* If we're going to check every file, ensure we save results */ if (is_cache_changed) istate->cache_changed |= FSMONITOR_CHANGED; From 13db71446b81591b8ffe952f02b56ad44fd80152 Mon Sep 17 00:00:00 2001 From: Ben Peart Date: Tue, 25 Sep 2018 16:28:16 -0400 Subject: [PATCH 052/104] virtualfilesystem: fix bug with symlinks being ignored The virtual file system code incorrectly treated symlinks as directories instead of regular files. This meant symlinks were not included even if they are listed in the list of files returned by the core.virtualFilesystem hook proc. Fixes #25 Signed-off-by: Ben Peart --- virtualfilesystem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/virtualfilesystem.c b/virtualfilesystem.c index de160cd5ee865f..e2102ca4f803ef 100644 --- a/virtualfilesystem.c +++ b/virtualfilesystem.c @@ -222,7 +222,7 @@ int is_excluded_from_virtualfilesystem(const char *pathname, int pathlen, int dt if (dtype != DT_REG && dtype != DT_DIR && dtype != DT_LNK) die(_("is_excluded_from_virtualfilesystem passed unhandled dtype")); - if (dtype == DT_REG) { + if (dtype == DT_REG || dtype == DT_LNK) { int ret = is_included_in_virtualfilesystem(pathname, pathlen); if (ret > 0) return 0; @@ -231,7 +231,7 @@ int is_excluded_from_virtualfilesystem(const char *pathname, int pathlen, int dt return ret; } - if (dtype == DT_DIR || dtype == DT_LNK) { + if (dtype == DT_DIR) { if (!parent_directory_hashmap.tablesize && virtual_filesystem_data.len) initialize_parent_directory_hashmap(&parent_directory_hashmap, &virtual_filesystem_data); if (!parent_directory_hashmap.tablesize) From 9bf76c97d6074820e8e6dde7d2e5f34b14983980 Mon Sep 17 00:00:00 2001 From: Derrick Stolee Date: Wed, 15 Apr 2020 16:19:31 +0000 Subject: [PATCH 053/104] gvfs: allow overriding core.gvfs We found a user who had set "core.gvfs = false" in their global config. This should not have been necessary, but it also should not have caused a problem. However, it did. The reason is that gvfs_load_config_value() is called from config.c when reading config key/value pairs from all the config files. The local config should override the global config, and this is done by config.c reading the global config first then reading the local config. However, our logic only allowed writing the core_gvfs variable once. Put the guards against multiple assignments of core_gvfs into gvfs_config_is_set() instead, because that will fix the problem _and_ keep multiple calls to gvfs_config_is_set() from slowing down. Signed-off-by: Derrick Stolee --- gvfs.c | 10 ++++------ t/t0021-conversion.sh | 4 ++++ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/gvfs.c b/gvfs.c index 7235199c04ac39..4360d77075ea0f 100644 --- a/gvfs.c +++ b/gvfs.c @@ -14,9 +14,6 @@ static int early_core_gvfs_config(const char *var, const char *value, void *data void gvfs_load_config_value(const char *value) { - if (gvfs_config_loaded) - return; - if (value) core_gvfs = git_config_bool_or_int("core.gvfs", value, &core_gvfs_is_bool); else if (startup_info->have_repository == 0) @@ -27,12 +24,13 @@ void gvfs_load_config_value(const char *value) /* Turn on all bits if a bool was set in the settings */ if (core_gvfs_is_bool && core_gvfs) core_gvfs = -1; - - gvfs_config_loaded = 1; } int gvfs_config_is_set(int mask) { - gvfs_load_config_value(0); + if (!gvfs_config_loaded) + gvfs_load_config_value(0); + + gvfs_config_loaded = 1; return (core_gvfs & mask) == mask; } diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh index 75e5d77435b4f8..75940df651c9a5 100755 --- a/t/t0021-conversion.sh +++ b/t/t0021-conversion.sh @@ -328,6 +328,10 @@ test_expect_success "filter: smudge filters blocked when under GVFS" ' test_config filter.empty-in-repo.smudge "echo smudged && cat" && test_config core.gvfs 64 && + test_must_fail git checkout && + + # ensure the local core.gvfs setting overwrites the global setting + git config --global core.gvfs false && test_must_fail git checkout ' From c049d39cde161174c9189a7850906ce59a8e54d9 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Fri, 14 Jun 2019 12:38:31 -0400 Subject: [PATCH 054/104] trace2:gvfs:experiment: read_cache: annotate thread usage in read-cache Add trace2_thread_start() and trace2_thread_exit() events to the worker threads used to read the index. This gives per-thread perf data. These workers were introduced in: abb4bb83845 read-cache: load cache extensions on a worker thread 77ff1127a4c read-cache: load cache entries on worker threads Signed-off-by: Jeff Hostetler --- read-cache.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/read-cache.c b/read-cache.c index fd3853217dc1c5..6034b337bd2abe 100644 --- a/read-cache.c +++ b/read-cache.c @@ -1994,6 +1994,17 @@ static void *load_index_extensions(void *_data) return NULL; } +static void *load_index_extensions_threadproc(void *_data) +{ + void *result; + + trace2_thread_start("load_index_extensions"); + result = load_index_extensions(_data); + trace2_thread_exit(); + + return result; +} + /* * A helper function that will load the specified range of cache entries * from the memory mapped file and add them to the given index. @@ -2069,12 +2080,17 @@ static void *load_cache_entries_thread(void *_data) struct load_cache_entries_thread_data *p = _data; int i; + trace2_thread_start("load_cache_entries"); + /* iterate across all ieot blocks assigned to this thread */ for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) { p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool, p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL); p->offset += p->ieot->entries[i].nr; } + + trace2_thread_exit(); + return NULL; } @@ -2224,7 +2240,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist) int err; p.src_offset = extension_offset; - err = pthread_create(&p.pthread, NULL, load_index_extensions, &p); + err = pthread_create(&p.pthread, NULL, load_index_extensions_threadproc, &p); if (err) die(_("unable to create load_index_extensions thread: %s"), strerror(err)); From 563d9c288848d49c569e4a4f035bfdf2b32060aa Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Thu, 21 Nov 2019 09:24:36 -0700 Subject: [PATCH 055/104] fsmonitor: add script for debugging and update script for tests The fsmonitor script that can be used for running all the git tests using watchman was causing some of the tests to fail because it wrote to stderr and created some files for debugging purposes. Add a new debug script to use with debugging and modify the other script to remove the code that would cause tests to fail. Signed-off-by: Kevin Willford --- t/t7519/fsmonitor-watchman | 22 +----- t/t7519/fsmonitor-watchman-debug | 128 +++++++++++++++++++++++++++++++ 2 files changed, 129 insertions(+), 21 deletions(-) create mode 100755 t/t7519/fsmonitor-watchman-debug diff --git a/t/t7519/fsmonitor-watchman b/t/t7519/fsmonitor-watchman index 264b9daf834ec8..6461f625f64181 100755 --- a/t/t7519/fsmonitor-watchman +++ b/t/t7519/fsmonitor-watchman @@ -17,7 +17,6 @@ use IPC::Open2; # 'git config core.fsmonitor .git/hooks/query-watchman' # my ($version, $time) = @ARGV; -#print STDERR "$0 $version $time\n"; # Check the hook interface version @@ -44,7 +43,7 @@ launch_watchman(); sub launch_watchman { - my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j') + my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty') or die "open2() failed: $!\n" . "Falling back to scanning...\n"; @@ -62,19 +61,11 @@ sub launch_watchman { "fields": ["name"] }] END - - open (my $fh, ">", ".git/watchman-query.json"); - print $fh $query; - close $fh; print CHLD_IN $query; close CHLD_IN; my $response = do {local $/; }; - open ($fh, ">", ".git/watchman-response.json"); - print $fh $response; - close $fh; - die "Watchman: command returned no output.\n" . "Falling back to scanning...\n" if $response eq ""; die "Watchman: command returned invalid output: $response\n" . @@ -93,7 +84,6 @@ sub launch_watchman { my $o = $json_pkg->new->utf8->decode($response); if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) { - print STDERR "Adding '$git_work_tree' to watchman's watch list.\n"; $retry--; qx/watchman watch "$git_work_tree"/; die "Failed to make watchman watch '$git_work_tree'.\n" . @@ -103,11 +93,6 @@ sub launch_watchman { # return the fast "everything is dirty" flag to git and do the # Watchman query just to get it over with now so we won't pay # the cost in git to look up each individual file. - - open ($fh, ">", ".git/watchman-output.out"); - print "/\0"; - close $fh; - print "/\0"; eval { launch_watchman() }; exit 0; @@ -116,11 +101,6 @@ sub launch_watchman { die "Watchman: $o->{error}.\n" . "Falling back to scanning...\n" if $o->{error}; - open ($fh, ">", ".git/watchman-output.out"); - binmode $fh, ":utf8"; - print $fh @{$o->{files}}; - close $fh; - binmode STDOUT, ":utf8"; local $, = "\0"; print @{$o->{files}}; diff --git a/t/t7519/fsmonitor-watchman-debug b/t/t7519/fsmonitor-watchman-debug new file mode 100755 index 00000000000000..d8e7a1e5ba85c0 --- /dev/null +++ b/t/t7519/fsmonitor-watchman-debug @@ -0,0 +1,128 @@ +#!/usr/bin/perl + +use strict; +use warnings; +use IPC::Open2; + +# An example hook script to integrate Watchman +# (https://facebook.github.io/watchman/) with git to speed up detecting +# new and modified files. +# +# The hook is passed a version (currently 1) and a time in nanoseconds +# formatted as a string and outputs to stdout all files that have been +# modified since the given time. Paths must be relative to the root of +# the working tree and separated by a single NUL. +# +# To enable this hook, rename this file to "query-watchman" and set +# 'git config core.fsmonitor .git/hooks/query-watchman' +# +my ($version, $time) = @ARGV; +#print STDERR "$0 $version $time\n"; + +# Check the hook interface version + +if ($version == 1) { + # convert nanoseconds to seconds + # subtract one second to make sure watchman will return all changes + $time = int ($time / 1000000000) - 1; +} else { + die "Unsupported query-fsmonitor hook version '$version'.\n" . + "Falling back to scanning...\n"; +} + +my $git_work_tree; +if ($^O =~ 'msys' || $^O =~ 'cygwin') { + $git_work_tree = Win32::GetCwd(); + $git_work_tree =~ tr/\\/\//; +} else { + require Cwd; + $git_work_tree = Cwd::cwd(); +} + +my $retry = 1; + +launch_watchman(); + +sub launch_watchman { + + my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j') + or die "open2() failed: $!\n" . + "Falling back to scanning...\n"; + + # In the query expression below we're asking for names of files that + # changed since $time but were not transient (ie created after + # $time but no longer exist). + # + # To accomplish this, we're using the "since" generator to use the + # recency index to select candidate nodes and "fields" to limit the + # output to file names only. + + my $query = <<" END"; + ["query", "$git_work_tree", { + "since": $time, + "fields": ["name"] + }] + END + + open (my $fh, ">", ".git/watchman-query.json"); + print $fh $query; + close $fh; + + print CHLD_IN $query; + close CHLD_IN; + my $response = do {local $/; }; + + open ($fh, ">", ".git/watchman-response.json"); + print $fh $response; + close $fh; + + die "Watchman: command returned no output.\n" . + "Falling back to scanning...\n" if $response eq ""; + die "Watchman: command returned invalid output: $response\n" . + "Falling back to scanning...\n" unless $response =~ /^\{/; + + my $json_pkg; + eval { + require JSON::XS; + $json_pkg = "JSON::XS"; + 1; + } or do { + require JSON::PP; + $json_pkg = "JSON::PP"; + }; + + my $o = $json_pkg->new->utf8->decode($response); + + if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) { + print STDERR "Adding '$git_work_tree' to watchman's watch list.\n"; + $retry--; + qx/watchman watch "$git_work_tree"/; + die "Failed to make watchman watch '$git_work_tree'.\n" . + "Falling back to scanning...\n" if $? != 0; + + # Watchman will always return all files on the first query so + # return the fast "everything is dirty" flag to git and do the + # Watchman query just to get it over with now so we won't pay + # the cost in git to look up each individual file. + + open ($fh, ">", ".git/watchman-output.out"); + print "/\0"; + close $fh; + + print "/\0"; + eval { launch_watchman() }; + exit 0; + } + + die "Watchman: $o->{error}.\n" . + "Falling back to scanning...\n" if $o->{error}; + + open ($fh, ">", ".git/watchman-output.out"); + binmode $fh, ":utf8"; + print $fh @{$o->{files}}; + close $fh; + + binmode STDOUT, ":utf8"; + local $, = "\0"; + print @{$o->{files}}; +} From 1a38880336f3bf0841865999ae49c85526e8965d Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Tue, 9 Oct 2018 10:19:14 -0600 Subject: [PATCH 056/104] virtualfilesystem: check if directory is included Add check to see if a directory is included in the virtualfilesystem before checking the directory hashmap. This allows a directory entry like foo/ to find all untracked files in subdirectories. --- t/t1092-virtualfilesystem.sh | 2 ++ virtualfilesystem.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/t/t1092-virtualfilesystem.sh b/t/t1092-virtualfilesystem.sh index 88ab4a6a6084df..280746b7b94167 100755 --- a/t/t1092-virtualfilesystem.sh +++ b/t/t1092-virtualfilesystem.sh @@ -221,6 +221,8 @@ test_expect_success 'verify folder entries include all files' ' cat > expected <<-\EOF && ?? dir1/a ?? dir1/b + ?? dir1/dir2/a + ?? dir1/dir2/b ?? dir1/untracked.txt EOF test_cmp expected actual diff --git a/virtualfilesystem.c b/virtualfilesystem.c index e2102ca4f803ef..c50ee2d29bfdaf 100644 --- a/virtualfilesystem.c +++ b/virtualfilesystem.c @@ -232,6 +232,10 @@ int is_excluded_from_virtualfilesystem(const char *pathname, int pathlen, int dt } if (dtype == DT_DIR) { + int ret = is_included_in_virtualfilesystem(pathname, pathlen); + if (ret > 0) + return 0; + if (!parent_directory_hashmap.tablesize && virtual_filesystem_data.len) initialize_parent_directory_hashmap(&parent_directory_hashmap, &virtual_filesystem_data); if (!parent_directory_hashmap.tablesize) From e483af85651303ae33ca8cef3e31d6796e8166c5 Mon Sep 17 00:00:00 2001 From: Kevin Willford Date: Fri, 27 Jul 2018 12:00:44 -0600 Subject: [PATCH 057/104] BRANCHES.md: Add explanation of branches and using forks --- BRANCHES.md | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 BRANCHES.md diff --git a/BRANCHES.md b/BRANCHES.md new file mode 100644 index 00000000000000..cecee7cf7ba6ab --- /dev/null +++ b/BRANCHES.md @@ -0,0 +1,53 @@ +Branches used in this repo +========================== + +The document explains the branching structure that we are using in the VFSForGit repository as well as the forking strategy that we have adopted for contributing. + +Repo Branches +------------- + +1. master + + This will track the Git for Windows repository master branch + +2. vfs + + Would like to use this branch as an ever-green branch that continually rebases the VFSForGit changes onto a windows ever-green branch that is on the core/master, so that we can detect when the patches for VFSForGit have issues or if we have a new version patches sent upstream git we can regenerate this branch. + +3. vs/master + + This tracks with the Git for Windows repository vs/master branch and are the generated files for using a Visual Studio solution. + +4. vfs-# + + These branches are used to track the specific version that match Git for Windows with the VFSForGit specific patches on top. When a new version of Git for Windows is released, the VFSForGit patches will be rebased on that windows version and a new gvfs-# branch created to create pull requests against. + + #### Examples + + ``` + vfs-2.20.0 + vfs-2.20.1 + ``` + + The versions of git for VFSForGit are based on the Git for Windows versions. v2.20.0.vfs.1 will correspond with the v2.20.0.windows.1 with the VFSForGit specific patches applied to the windows version. + +Tags +---- + +We are using annotated tags to build the version number for git. The build will look back through the commit history to find the first tag matching `v[0-9]*vfs*` and build the git version number using that tag. + +Forking +------- + +A personal fork of this repository and a branch in that repository should be used for development. + +These branches should be based on the latest vfs-# branch. If there are work in progress pull requests that you have based on a previous version branch when a new version branch is created, you will need to move your patches to the new branch to get them in that latest version. + +#### Example + +``` +git clone +git remote add ms https://github.com/Microsoft/git.git +git checkout -b my-changes ms/vfs-2.20.0 --no-track +git push -fu origin HEAD +``` From 7e4b0be2ab342a5e70cb3dc25e39b87308a616bb Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 9 Jul 2019 14:43:47 -0400 Subject: [PATCH 058/104] trace2:gvfs:experiment: read-cache: time read/write of cache-tree extension Add regions around code to read and write the cache-tree extension when the index is read or written. This is an experiment and may be dropped in future releases if we don't need it anymore. This experiment demonstrates that it takes more time to parse and deserialize the cache-tree extension than it does to read the cache-entries. Commits [1] and [2] spreads cache-entry reading across N-1 cores and dedicates a single core to simultaneously read the index extensions. Local testing (on my machine) shows that reading the cache-tree extension takes ~0.28 seconds. The 11 cache-entry threads take ~0.08 seconds. The main thread is blocked for 0.15 to 0.20 seconds waiting for the extension thread to finish. Let's use this commit to gather some telemetry and confirm this. My point is that improvements, such as index V5 which makes the cache entries smaller, may improve performance, but the gains may be limited because of this extension. And that we may need to look inside the cache-tree extension to truly improve do_read_index() performance. [1] abb4bb83845 read-cache: load cache extensions on a worker thread [2] 77ff1127a4c read-cache: load cache entries on worker threads Signed-off-by: Jeff Hostetler --- read-cache.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/read-cache.c b/read-cache.c index 6034b337bd2abe..7d2ceb926ec8a5 100644 --- a/read-cache.c +++ b/read-cache.c @@ -1739,7 +1739,10 @@ static int read_index_extension(struct index_state *istate, { switch (CACHE_EXT(ext)) { case CACHE_EXT_TREE: + trace2_region_enter("index", "read/extension/cache_tree", NULL); istate->cache_tree = cache_tree_read(data, sz); + trace2_data_intmax("index", NULL, "read/extension/cache_tree/bytes", (intmax_t)sz); + trace2_region_leave("index", "read/extension/cache_tree", NULL); break; case CACHE_EXT_RESOLVE_UNDO: istate->resolve_undo = resolve_undo_read(data, sz); @@ -2988,9 +2991,13 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile, if (!strip_extensions && !drop_cache_tree && istate->cache_tree) { struct strbuf sb = STRBUF_INIT; + trace2_region_enter("index", "write/extension/cache_tree", NULL); cache_tree_write(&sb, istate->cache_tree); err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_TREE, sb.len) < 0 || ce_write(&c, newfd, sb.buf, sb.len) < 0; + trace2_data_intmax("index", NULL, "write/extension/cache_tree/bytes", (intmax_t)sb.len); + trace2_region_leave("index", "write/extension/cache_tree", NULL); + strbuf_release(&sb); if (err) return -1; From d043db00352f1a1dee3fb91309aa9db3483d1d6e Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Fri, 10 Apr 2020 21:14:44 -0400 Subject: [PATCH 059/104] status: disable deserialize when verbose output requested. Disable deserialization when verbose output requested. Verbose mode causes Git to print diffs for modified files. This requires the index to be loaded to have the currently staged OID values. Without loading the index, verbose output make it look like everything was deleted. Signed-off-by: Jeff Hostetler --- builtin/commit.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/builtin/commit.c b/builtin/commit.c index eeea02a24948ae..dd3673444aa210 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -1595,6 +1595,22 @@ int cmd_status(int argc, const char **argv, const char *prefix) */ try_deserialize = (!do_serialize && (do_implicit_deserialize || do_explicit_deserialize)); + + /* + * Disable deserialize when verbose is set because it causes us to + * print diffs for each modified file, but that requires us to have + * the index loaded and we don't want to do that (at least not now for + * this seldom used feature). My fear is that would further tangle + * the merge conflict with upstream. + * + * TODO Reconsider this in the future. + */ + if (try_deserialize && verbose) { + trace2_data_string("status", the_repository, "deserialize/reject", + "args/verbose"); + try_deserialize = 0; + } + if (try_deserialize) goto skip_init; From b99bb749f0c3ca1c856f2ad04af51771d21a93b7 Mon Sep 17 00:00:00 2001 From: Jameson Miller Date: Tue, 20 Nov 2018 11:53:53 -0500 Subject: [PATCH 060/104] vfs: fix case where directories not handled correctly The vfs does not correctly handle the case when there is a file that begins with the same prefix as a directory. For example, the following setup would encounter this issue: A directory contains a file named `dir1.sln` and a directory named `dir1/`. The directory `dir1` contains other files. The directory `dir1` is in the virtual file system list The contents of `dir1` should be in the virtual file system, but it is not. The contents of this directory do not have the skip worktree bit cleared as expected. The problem is in the `apply_virtualfilesystem(...)` function where it does not include the trailing slash of the directory name when looking up the position in the index to start clearing the skip worktree bit. This fix is it include the trailing slash when finding the first index entry from `index_name_pos(...)`. --- t/t1092-virtualfilesystem.sh | 19 +++++++++++++++++++ virtualfilesystem.c | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/t/t1092-virtualfilesystem.sh b/t/t1092-virtualfilesystem.sh index 280746b7b94167..ddb54cbc0ffdb8 100755 --- a/t/t1092-virtualfilesystem.sh +++ b/t/t1092-virtualfilesystem.sh @@ -348,4 +348,23 @@ test_expect_success 'on folder renamed' ' test_cmp expected actual ' +test_expect_success 'folder with same prefix as file' ' + clean_repo && + touch dir1.sln && + write_script .git/hooks/virtualfilesystem <<-\EOF && + printf "dir1/\0" + printf "dir1.sln\0" + EOF + git add dir1.sln && + git ls-files -v > actual && + cat > expected <<-\EOF && + H dir1.sln + H dir1/file1.txt + H dir1/file2.txt + S dir2/file1.txt + S dir2/file2.txt + EOF + test_cmp expected actual +' + test_done diff --git a/virtualfilesystem.c b/virtualfilesystem.c index c50ee2d29bfdaf..c93357e1508a51 100644 --- a/virtualfilesystem.c +++ b/virtualfilesystem.c @@ -277,7 +277,7 @@ void apply_virtualfilesystem(struct index_state *istate) if (buf[i - 1] == '/') { if (ignore_case) adjust_dirname_case(istate, entry); - pos = index_name_pos(istate, entry, len - 1); + pos = index_name_pos(istate, entry, len); if (pos < 0) { pos = -pos - 1; while (pos < istate->cache_nr && !fspathncmp(istate->cache[pos]->name, entry, len)) { From 3a8dcbeb9e3f5d9f71d957d1155be3a6fa7c86b3 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Tue, 4 Feb 2020 19:36:46 +0100 Subject: [PATCH 061/104] vfs: disable `git update-git-for-windows` If a user runs git update-git-for-windows, then they will upgrade to a version that does not support microsoft/vfsforgit or microsoft/scalar. Therefore, let's prevent this. This addresses https://github.com/microsoft/git/issues/241 --- builtin.h | 1 + git.c | 2 ++ help.c | 16 ++++++++++++++++ t/t0402-block-command-on-gvfs.sh | 7 +++++++ 4 files changed, 26 insertions(+) diff --git a/builtin.h b/builtin.h index a5ae15bfe54b65..3583148a08a44e 100644 --- a/builtin.h +++ b/builtin.h @@ -220,6 +220,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix); int cmd_tar_tree(int argc, const char **argv, const char *prefix); int cmd_unpack_file(int argc, const char **argv, const char *prefix); int cmd_unpack_objects(int argc, const char **argv, const char *prefix); +int cmd_update(int argc, const char **argv, const char *prefix); int cmd_update_index(int argc, const char **argv, const char *prefix); int cmd_update_ref(int argc, const char **argv, const char *prefix); int cmd_update_server_info(int argc, const char **argv, const char *prefix); diff --git a/git.c b/git.c index ed4cae3dfbe647..e202aeb67bbe6a 100644 --- a/git.c +++ b/git.c @@ -656,6 +656,8 @@ static struct cmd_struct commands[] = { { "tag", cmd_tag, RUN_SETUP | DELAY_PAGER_CONFIG }, { "unpack-file", cmd_unpack_file, RUN_SETUP | NO_PARSEOPT }, { "unpack-objects", cmd_unpack_objects, RUN_SETUP | NO_PARSEOPT }, + { "update", cmd_update }, + { "update-git-for-windows", cmd_update }, { "update-index", cmd_update_index, RUN_SETUP }, { "update-ref", cmd_update_ref, RUN_SETUP }, { "update-server-info", cmd_update_server_info, RUN_SETUP }, diff --git a/help.c b/help.c index 1de9c0d589cd9b..b2108949b5c1f4 100644 --- a/help.c +++ b/help.c @@ -719,3 +719,19 @@ NORETURN void help_unknown_ref(const char *ref, const char *cmd, string_list_clear(&suggested_refs, 0); exit(1); } + +int cmd_update(int argc, const char **argv, const char *prefix) +{ + const char * const usage[] = { + N_("git update-git-for-windows []\n" + "\t(not supported in this build of Git for Windows)"), + NULL + }; + struct option options[] = { + OPT_END() + }; + + argc = parse_options(argc, argv, prefix, options, usage, 0); + + die(_("git %s is not supported in VFSforGit"), argv[0]); +} diff --git a/t/t0402-block-command-on-gvfs.sh b/t/t0402-block-command-on-gvfs.sh index 3370abad464d50..fd30430b09accc 100755 --- a/t/t0402-block-command-on-gvfs.sh +++ b/t/t0402-block-command-on-gvfs.sh @@ -36,4 +36,11 @@ test_expect_success 'test gc --auto succeeds when disabled via config' ' git gc --auto ' +test_expect_success 'update-git-for-windows disabled' ' + test_must_fail git update 2>out && + test_i18ngrep VFS out && + test_must_fail git update-git-for-windows 2>out && + test_i18ngrep VFS out +' + test_done From b9df0c9b28e31d3a661be4d58e42993ca4fc5607 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Mon, 3 Jun 2019 11:53:08 -0400 Subject: [PATCH 062/104] trace2:gvfs:experiment: add prime_cache_tree region Signed-off-by: Jeff Hostetler --- cache-tree.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cache-tree.c b/cache-tree.c index cc4dfd8c53d402..da9132781d6274 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -757,10 +757,14 @@ void prime_cache_tree(struct repository *r, struct index_state *istate, struct tree *tree) { + trace2_region_enter("cache_tree", "prime_cache_tree", r); + cache_tree_free(&istate->cache_tree); istate->cache_tree = cache_tree(); prime_cache_tree_rec(r, istate->cache_tree, tree); istate->cache_changed |= CACHE_TREE_CHANGED; + + trace2_region_leave("cache_tree", "prime_cache_tree", r); } /* From a4568d6348ab1cc779f589dc78a0cfb7f2aadd3b Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Fri, 10 Apr 2020 21:18:41 -0400 Subject: [PATCH 063/104] t7524: add test for verbose status deserialzation Verify that `git status --deserialize=x -v` does not crash and generates the same output as a normal (scanning) status command. These issues are described in the previous 2 commits. Signed-off-by: Jeff Hostetler --- t/t7524-serialized-status.sh | 39 ++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/t/t7524-serialized-status.sh b/t/t7524-serialized-status.sh index 743ecd59b2827d..358a9b0b9359d9 100755 --- a/t/t7524-serialized-status.sh +++ b/t/t7524-serialized-status.sh @@ -387,4 +387,43 @@ EOF ' +test_expect_success 'ensure deserialize -v does not crash' ' + + git init verbose_test && + touch verbose_test/a && + touch verbose_test/b && + touch verbose_test/c && + git -C verbose_test add a b c && + git -C verbose_test commit -m abc && + + echo green >>verbose_test/a && + git -C verbose_test add a && + echo red_1 >>verbose_test/b && + echo red_2 >verbose_test/dirt && + + git -C verbose_test status >output.ref && + git -C verbose_test status -v >output.ref_v && + + git -C verbose_test --no-optional-locks status --serialize=../verbose_test.dat >output.ser.long && + git -C verbose_test --no-optional-locks status --serialize=../verbose_test.dat_v -v >output.ser.long_v && + + # Verify that serialization does not affect the status output itself. + test_i18ncmp output.ref output.ser.long && + test_i18ncmp output.ref_v output.ser.long_v && + + GIT_TRACE2_PERF="$(pwd)"/verbose_test.log \ + git -C verbose_test status --deserialize=../verbose_test.dat >output.des.long && + + # Verify that normal deserialize was actually used and produces the same result. + test_i18ncmp output.ser.long output.des.long && + grep -q "deserialize/result:ok" verbose_test.log && + + GIT_TRACE2_PERF="$(pwd)"/verbose_test.log_v \ + git -C verbose_test status --deserialize=../verbose_test.dat_v -v >output.des.long_v && + + # Verify that vebose mode produces the same result because verbose was rejected. + test_i18ncmp output.ser.long_v output.des.long_v && + grep -q "deserialize/reject:args/verbose" verbose_test.log_v +' + test_done From 4451a05e3856134b5eaa1f1ca6a9fa3b3f7751a1 Mon Sep 17 00:00:00 2001 From: Johannes Schindelin Date: Tue, 28 May 2019 21:48:08 +0200 Subject: [PATCH 064/104] backwards-compatibility: support the post-indexchanged hook When our patches to support that hook were upstreamed, the hook's name was eliciting some reviewer suggestions, and it was renamed to `post-index-change`. These patches (with the new name) made it into v2.22.0. However, VFSforGit users may very well have checkouts with that hook installed under the original name. To support this, let's just introduce a hack where we look a bit more closely when we just failed to find the `post-index-change` hook, and allow any `post-indexchanged` hook to run instead (if it exists). --- run-command.c | 11 +++++++++++ t/t7113-post-index-change-hook.sh | 30 ++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/run-command.c b/run-command.c index d9b549a1d18e5b..32896e9e2ea542 100644 --- a/run-command.c +++ b/run-command.c @@ -1407,6 +1407,17 @@ int run_hook_argv(const char *const *env, const char *name, const char *p; p = find_hook(name); + /* + * Backwards compatibility hack in VFS for Git: when originally + * introduced (and used!), it was called `post-indexchanged`, but this + * name was changed during the review on the Git mailing list. + * + * Therefore, when the `post-index-change` hook is not found, let's + * look for a hook with the old name (which would be found in case of + * already-existing checkouts). + */ + if (!p && !strcmp(name, "post-index-change")) + p = find_hook("post-indexchanged"); if (!p) return 0; diff --git a/t/t7113-post-index-change-hook.sh b/t/t7113-post-index-change-hook.sh index f011ad7eece890..d358b92b2db879 100755 --- a/t/t7113-post-index-change-hook.sh +++ b/t/t7113-post-index-change-hook.sh @@ -12,6 +12,36 @@ test_expect_success 'setup' ' git commit -m "initial" ' +test_expect_success 'post-indexchanged' ' + mkdir -p .git/hooks && + test_when_finished "rm -f .git/hooks/post-indexchanged marker" && + write_script .git/hooks/post-indexchanged <<-\EOF && + : >marker + EOF + + : make sure -changed is called if -change does not exist && + test_when_finished "echo testing >dir1/file2.txt && git status" && + echo changed >dir1/file2.txt && + : force index to be dirty && + test-tool chmtime -60 .git/index && + git status && + test_path_is_file marker && + + test_when_finished "rm -f .git/hooks/post-index-change marker2" && + write_script .git/hooks/post-index-change <<-\EOF && + : >marker2 + EOF + + : make sure -changed is not called if -change exists && + rm -f marker marker2 && + echo testing >dir1/file2.txt && + : force index to be dirty && + test-tool chmtime -60 .git/index && + git status && + test_path_is_missing marker && + test_path_is_file marker2 +' + test_expect_success 'test status, add, commit, others trigger hook without flags set' ' mkdir -p .git/hooks && write_script .git/hooks/post-index-change <<-\EOF && From 1fb6b1eef1c2a711c02822166d27f1e2f468ffaf Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 16 Jul 2019 09:09:53 -0400 Subject: [PATCH 065/104] trace2:gvfs:experiment: add region to apply_virtualfilesystem() Signed-off-by: Jeff Hostetler --- virtualfilesystem.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/virtualfilesystem.c b/virtualfilesystem.c index 7a107c3e2e107b..a7d2ff103a2fc8 100644 --- a/virtualfilesystem.c +++ b/virtualfilesystem.c @@ -263,6 +263,8 @@ void apply_virtualfilesystem(struct index_state *istate) if (!git_config_get_virtualfilesystem()) return; + trace2_region_enter("vfs", "apply", the_repository); + if (!virtual_filesystem_data.len) get_virtual_filesystem_data(&virtual_filesystem_data); @@ -333,6 +335,8 @@ void apply_virtualfilesystem(struct index_state *istate) trace2_data_intmax("vfs", the_repository, "apply/nr_bulk_skip", nr_bulk_skip); trace2_data_intmax("vfs", the_repository, "apply/nr_explicit_skip", nr_explicit_skip); } + + trace2_region_leave("vfs", "apply", the_repository); } /* From f9465bbd0b6335412c9f431615b9731e4856f077 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Wed, 13 May 2020 17:38:50 -0400 Subject: [PATCH 066/104] deserialize-status: silently fallback if we cannot read cache file Teach Git to not throw a fatal error when an explicitly-specified status-cache file (`git status --deserialize=`) could not be found or opened for reading and silently fallback to a traditional scan. This matches the behavior when the status-cache file is implicitly given via a config setting. Signed-off-by: Jeff Hostetler --- builtin/commit.c | 18 ++++++++++++------ t/t7524-serialized-status.sh | 16 ++++++++++++++++ 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/builtin/commit.c b/builtin/commit.c index dd3673444aa210..fa83f9ea7b4916 100644 --- a/builtin/commit.c +++ b/builtin/commit.c @@ -212,12 +212,18 @@ static int opt_parse_deserialize(const struct option *opt, const char *arg, int free(deserialize_path); deserialize_path = xstrdup(arg); } - if (deserialize_path && *deserialize_path - && (access(deserialize_path, R_OK) != 0)) - die("cannot find serialization file '%s'", - deserialize_path); - - do_explicit_deserialize = 1; + if (!deserialize_path || !*deserialize_path) + do_explicit_deserialize = 1; /* read stdin */ + else if (access(deserialize_path, R_OK) == 0) + do_explicit_deserialize = 1; /* can read from this file */ + else { + /* + * otherwise, silently fallback to the normal + * collection scan + */ + do_implicit_deserialize = 0; + do_explicit_deserialize = 0; + } } return 0; diff --git a/t/t7524-serialized-status.sh b/t/t7524-serialized-status.sh index 358a9b0b9359d9..89514a7fa3a0a5 100755 --- a/t/t7524-serialized-status.sh +++ b/t/t7524-serialized-status.sh @@ -426,4 +426,20 @@ test_expect_success 'ensure deserialize -v does not crash' ' grep -q "deserialize/reject:args/verbose" verbose_test.log_v ' +test_expect_success 'fallback when implicit' ' + git init implicit_fallback_test && + git -C implicit_fallback_test -c status.deserializepath=foobar status +' + +test_expect_success 'fallback when explicit' ' + git init explicit_fallback_test && + git -C explicit_fallback_test status --deserialize=foobar +' + +test_expect_success 'deserialize from stdin' ' + git init stdin_test && + git -C stdin_test status --serialize >serialized_status.dat && + cat serialize_status.dat | git -C stdin_test status --deserialize +' + test_done From 91c711ba6023a437ab36622f6db4591b2a440586 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 16 Jul 2019 10:08:08 -0400 Subject: [PATCH 067/104] trace2:gvfs:experiment: add region around unpack_trees() Signed-off-by: Jeff Hostetler --- unpack-trees.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/unpack-trees.c b/unpack-trees.c index 7d3b5deee01211..317e7a0aa93596 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -1594,6 +1594,8 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options if (len > MAX_UNPACK_TREES) die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES); + trace2_region_enter("exp", "unpack_trees", NULL); + trace_performance_enter(); if (!core_apply_sparse_checkout || !o->update) o->skip_sparse_checkout = 1; @@ -1762,6 +1764,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options if (free_pattern_list) clear_pattern_list(&pl); trace_performance_leave("unpack_trees"); + trace2_region_leave("exp", "unpack_trees", NULL); return ret; return_failed: From 83b1df3690c183b40a72731a57600e7c1f31c14f Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Thu, 3 Oct 2019 13:21:26 -0400 Subject: [PATCH 068/104] credential: set trace2_child_class for credential manager children Signed-off-by: Jeff Hostetler --- credential.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/credential.c b/credential.c index 2acb3bcd9e8133..e014bda2743fac 100644 --- a/credential.c +++ b/credential.c @@ -286,6 +286,8 @@ static int run_credential_helper(struct credential *c, else helper.no_stdout = 1; + helper.trace2_child_class = "cred"; + if (start_command(&helper) < 0) return -1; From 871e8d04d2d5f8e905597875bbb408ae604d0ecd Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Wed, 18 Sep 2019 10:35:45 -0400 Subject: [PATCH 069/104] sub-process: do not borrow cmd pointer from caller Teach subprocess_start() to use a copy of the passed `cmd` string rather than borrowing the buffer from the caller. Some callers of subprocess_start() pass the value returned from find_hook() which points to a static buffer and therefore is only good until the next call to find_hook(). This could cause problems for the long-running background processes managed by sub-process.c where later calls to subprocess_find_entry() to get an existing process will fail. This could cause more than 1 long-running process to be created. TODO Need to confirm, but if only read_object_hook() uses TODO subprocess_start() in this manner, we could drop this TODO commit when we drop support for read_object_hook(). Signed-off-by: Jeff Hostetler --- sub-process.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/sub-process.c b/sub-process.c index 1b1af9dcbd9599..a49052900e89a6 100644 --- a/sub-process.c +++ b/sub-process.c @@ -80,7 +80,12 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co int err; struct child_process *process; - entry->cmd = cmd; + // BUGBUG most callers to subprocess_start() pass in "cmd" the value + // BUGBUG of find_hook() which returns a static buffer (that's only + // BUGBUG good until the next call to find_hook()). + // BUGFIX Defer assignment until we copy the string in our argv. + // entry->cmd = cmd; + process = &entry->process; child_process_init(process); @@ -92,6 +97,8 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co process->clean_on_exit_handler = subprocess_exit_handler; process->trace2_child_class = "subprocess"; + entry->cmd = process->args.argv[0]; + err = start_command(process); if (err) { error("cannot fork to run subprocess '%s'", cmd); From c0be9284e29136fcf0c0e8069692902be3f1b076 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Wed, 18 Sep 2019 10:45:58 -0400 Subject: [PATCH 070/104] sub-process: add subprocess_start_argv() Add function to start a subprocess with an argv. Signed-off-by: Jeff Hostetler --- sub-process.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ sub-process.h | 6 ++++++ 2 files changed, 53 insertions(+) diff --git a/sub-process.c b/sub-process.c index a49052900e89a6..cd05ef28521f3a 100644 --- a/sub-process.c +++ b/sub-process.c @@ -4,6 +4,7 @@ #include "sub-process.h" #include "sigchain.h" #include "pkt-line.h" +#include "quote.h" int cmd2process_cmp(const void *unused_cmp_data, const struct hashmap_entry *eptr, @@ -118,6 +119,52 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co return 0; } +int subprocess_start_argv(struct hashmap *hashmap, + struct subprocess_entry *entry, + int is_git_cmd, + const struct argv_array *argv, + subprocess_start_fn startfn) +{ + int err; + int k; + struct child_process *process; + struct strbuf quoted = STRBUF_INIT; + + process = &entry->process; + + child_process_init(process); + for (k = 0; k < argv->argc; k++) + argv_array_push(&process->args, argv->argv[k]); + process->use_shell = 1; + process->in = -1; + process->out = -1; + process->git_cmd = is_git_cmd; + process->clean_on_exit = 1; + process->clean_on_exit_handler = subprocess_exit_handler; + process->trace2_child_class = "subprocess"; + + sq_quote_argv_pretty("ed, argv->argv); + entry->cmd = strbuf_detach("ed, 0); + + err = start_command(process); + if (err) { + error("cannot fork to run subprocess '%s'", entry->cmd); + return err; + } + + hashmap_entry_init(&entry->ent, strhash(entry->cmd)); + + err = startfn(entry); + if (err) { + error("initialization for subprocess '%s' failed", entry->cmd); + subprocess_stop(hashmap, entry); + return err; + } + + hashmap_add(hashmap, &entry->ent); + return 0; +} + static int handshake_version(struct child_process *process, const char *welcome_prefix, int *versions, int *chosen_version) diff --git a/sub-process.h b/sub-process.h index e85f21fa1a7c2b..df91febac6179a 100644 --- a/sub-process.h +++ b/sub-process.h @@ -57,6 +57,12 @@ typedef int(*subprocess_start_fn)(struct subprocess_entry *entry); int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, const char *cmd, subprocess_start_fn startfn); +int subprocess_start_argv(struct hashmap *hashmap, + struct subprocess_entry *entry, + int is_git_cmd, + const struct argv_array *argv, + subprocess_start_fn startfn); + /* Kill a subprocess and remove it from the subprocess hashmap. */ void subprocess_stop(struct hashmap *hashmap, struct subprocess_entry *entry); From 935747e2a8bae8bbb76bb41ab4b1af9b540a6732 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 24 Sep 2019 14:31:10 -0400 Subject: [PATCH 071/104] sha1-file: add function to update existing loose object cache Create a function to add a new object to the loose object cache after the existing odb/xx/ directory was scanned. This will be used in a later commit to keep the loose object cache fresh after dynamically fetching an individual object and without requiring the odb/xx/ directory to be rescanned. Signed-off-by: Jeff Hostetler --- object-store.h | 8 ++++++++ sha1-file.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/object-store.h b/object-store.h index d1e490f2035d2f..77999b9176ce25 100644 --- a/object-store.h +++ b/object-store.h @@ -57,6 +57,14 @@ void add_to_alternates_memory(const char *dir); struct oid_array *odb_loose_cache(struct object_directory *odb, const struct object_id *oid); +/* + * Add a new object to the loose object cache (possibly after the + * cache was populated). This might be used after dynamically + * fetching a missing object. + */ +void odb_loose_cache_add_new_oid(struct object_directory *odb, + const struct object_id *oid); + /* Empty the loose object cache for the specified object directory. */ void odb_clear_loose_cache(struct object_directory *odb); diff --git a/sha1-file.c b/sha1-file.c index 0530b99cce2498..91e58f144eeb61 100644 --- a/sha1-file.c +++ b/sha1-file.c @@ -2544,6 +2544,39 @@ struct oid_array *odb_loose_cache(struct object_directory *odb, return &odb->loose_objects_cache[subdir_nr]; } +void odb_loose_cache_add_new_oid(struct object_directory *odb, + const struct object_id *oid) +{ + int subdir_nr = oid->hash[0]; + + if (subdir_nr < 0 || + subdir_nr >= ARRAY_SIZE(odb->loose_objects_subdir_seen)) + BUG("subdir_nr out of range"); + + /* + * If the looose object cache already has an oid_array covering + * cell [xx], we assume that the cache was loaded *before* the + * new object was created, so we just need to append our new one + * to the existing array. + * + * Otherwise, cause the [xx] cell to be created by scanning the + * directory. And since this happens *after* our caller created + * the loose object, we don't need to explicitly add it to the + * array. + * + * TODO If the subdir has not been seen, we don't technically + * TODO need to force load it now. We could wait and let our + * TODO caller (or whoever requested the missing object) cause + * TODO try to read the xx/ object and fill the cache. + * TODO Not sure it matters either way. + */ + if (odb->loose_objects_subdir_seen[subdir_nr]) + append_loose_object(oid, NULL, + &odb->loose_objects_cache[subdir_nr]); + else + odb_loose_cache(odb, oid); +} + void odb_clear_loose_cache(struct object_directory *odb) { int i; From dbc6974a3acdcd3361f8c4291ab393a8d16529ac Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Wed, 25 Sep 2019 13:36:54 -0400 Subject: [PATCH 072/104] packfile: add install_packed_git_and_mru() Create a function to install a new packfile into the packed-git list and add it to the head of the MRU list. This function will be used in a later commit to install packfiles created by dynamic object fetching. Signed-off-by: Jeff Hostetler --- packfile.c | 6 ++++++ packfile.h | 1 + 2 files changed, 7 insertions(+) diff --git a/packfile.c b/packfile.c index 1cafc811066dab..16376288d3efcc 100644 --- a/packfile.c +++ b/packfile.c @@ -750,6 +750,12 @@ void install_packed_git(struct repository *r, struct packed_git *pack) hashmap_add(&r->objects->pack_map, &pack->packmap_ent); } +void install_packed_git_and_mru(struct repository *r, struct packed_git *pack) +{ + install_packed_git(r, pack); + list_add(&pack->mru, &r->objects->packed_git_mru); +} + void (*report_garbage)(unsigned seen_bits, const char *path); static void report_helper(const struct string_list *list, diff --git a/packfile.h b/packfile.h index 0fe6b054115253..ad08e37b1ce9e2 100644 --- a/packfile.h +++ b/packfile.h @@ -53,6 +53,7 @@ extern void (*report_garbage)(unsigned seen_bits, const char *path); void reprepare_packed_git(struct repository *r); void install_packed_git(struct repository *r, struct packed_git *pack); +void install_packed_git_and_mru(struct repository *r, struct packed_git *pack); struct packed_git *get_packed_git(struct repository *r); struct list_head *get_packed_git_mru(struct repository *r); From d643ee4916c37028989c43ff422b99503ccb42c1 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 24 Sep 2019 15:51:16 -0400 Subject: [PATCH 073/104] index-pack: avoid immediate object fetch while parsing packfile Prevent packfile parsing from accidentally dynamically fetching each individual object found in the packfile. When index-pack parses the input packfile, it does a lookup in the ODB to test for conflicts/collisions. This can accidentally cause the object to be individually fetched when gvfs-helper (or read-object-hook or partial-clone) is enabled. Signed-off-by: Jeff Hostetler --- builtin/index-pack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/index-pack.c b/builtin/index-pack.c index f176dd28c870d5..3de2630fc77f6e 100644 --- a/builtin/index-pack.c +++ b/builtin/index-pack.c @@ -782,7 +782,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry, if (startup_info->have_repository) { read_lock(); collision_test_needed = - has_object_file_with_flags(oid, OBJECT_INFO_QUICK); + has_object_file_with_flags(oid, OBJECT_INFO_FOR_PREFETCH); read_unlock(); } From 8efa4afb0c5b0981f1b73e774dbbafbcc6e04fef Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 13 Aug 2019 12:12:08 -0400 Subject: [PATCH 074/104] gvfs-helper: create tool to fetch objects using the GVFS Protocol Create gvfs-helper. This is a helper tool to use the GVFS Protocol REST API to fetch objects and configuration data from a GVFS cache-server or Git server. This tool uses libcurl to send object requests to either server. This tool creates loose objects and/or packfiles. Create gvfs-helper-client. This code resides within git proper and uses the sub-process API to manage gvfs-helper as a long-running background process. Signed-off-by: Jeff Hostetler Signed-off-by: Derrick Stolee --- .gitignore | 1 + Documentation/config.txt | 2 + Documentation/config/core.txt | 3 + Documentation/config/gvfs.txt | 5 + Makefile | 7 + cache.h | 3 + config.c | 40 + environment.c | 3 + gvfs-helper-client.c | 391 ++++++ gvfs-helper-client.h | 68 + gvfs-helper.c | 2235 +++++++++++++++++++++++++++++++++ promisor-remote.c | 11 +- sha1-file.c | 31 +- t/helper/.gitignore | 1 + 14 files changed, 2799 insertions(+), 2 deletions(-) create mode 100644 Documentation/config/gvfs.txt create mode 100644 gvfs-helper-client.c create mode 100644 gvfs-helper-client.h create mode 100644 gvfs-helper.c diff --git a/.gitignore b/.gitignore index ee509a2ad26398..97d7327546d9fc 100644 --- a/.gitignore +++ b/.gitignore @@ -73,6 +73,7 @@ /git-gc /git-get-tar-commit-id /git-grep +/git-gvfs-helper /git-hash-object /git-help /git-http-backend diff --git a/Documentation/config.txt b/Documentation/config.txt index 01cc9c0d321a1d..b739895032b177 100644 --- a/Documentation/config.txt +++ b/Documentation/config.txt @@ -374,6 +374,8 @@ include::config/gui.txt[] include::config/guitool.txt[] +include::config/gvfs.txt[] + include::config/help.txt[] include::config/http.txt[] diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index 821c62cd3cbd9e..5ad4f10004679f 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -672,6 +672,9 @@ core.gvfs:: flag just blocks them from occurring at all. -- +core.useGvfsHelper:: + TODO + core.sparseCheckout:: Enable "sparse checkout" feature. See linkgit:git-sparse-checkout[1] for more information. diff --git a/Documentation/config/gvfs.txt b/Documentation/config/gvfs.txt new file mode 100644 index 00000000000000..6ab221ded36c91 --- /dev/null +++ b/Documentation/config/gvfs.txt @@ -0,0 +1,5 @@ +gvfs.cache-server:: + TODO + +gvfs.sharedcache:: + TODO diff --git a/Makefile b/Makefile index fe36dce720106b..36a607ee573f3c 100644 --- a/Makefile +++ b/Makefile @@ -891,6 +891,7 @@ LIB_OBJS += gpg-interface.o LIB_OBJS += graph.o LIB_OBJS += grep.o LIB_OBJS += gvfs.o +LIB_OBJS += gvfs-helper-client.o LIB_OBJS += hashmap.o LIB_OBJS += help.o LIB_OBJS += hex.o @@ -1379,6 +1380,8 @@ else endif BASIC_CFLAGS += $(CURL_CFLAGS) + PROGRAM_OBJS += gvfs-helper.o + REMOTE_CURL_PRIMARY = git-remote-http$X REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES) @@ -2494,6 +2497,10 @@ $(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o GIT-LDFLAGS $(GITLIBS $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS) +git-gvfs-helper$X: gvfs-helper.o http.o GIT-LDFLAGS $(GITLIBS) + $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \ + $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS) + $(LIB_FILE): $(LIB_OBJS) $(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^ diff --git a/cache.h b/cache.h index 9d14e512f90854..ebad31e272ccec 100644 --- a/cache.h +++ b/cache.h @@ -961,6 +961,9 @@ extern int precomposed_unicode; extern int protect_hfs; extern int protect_ntfs; extern const char *core_fsmonitor; +extern int core_use_gvfs_helper; +extern const char *gvfs_cache_server_url; +extern const char *gvfs_shared_cache_pathname; extern int core_apply_sparse_checkout; extern int core_sparse_checkout_cone; diff --git a/config.c b/config.c index 16626712043e05..7b2e5d87b36ffd 100644 --- a/config.c +++ b/config.c @@ -21,6 +21,7 @@ #include "color.h" #include "refs.h" #include "gvfs.h" +#include "transport.h" struct config_source { struct config_source *prev; @@ -1380,6 +1381,11 @@ static int git_default_core_config(const char *var, const char *value, void *cb) return 0; } + if (!strcmp(var, "core.usegvfshelper")) { + core_use_gvfs_helper = git_config_bool(var, value); + return 0; + } + if (!strcmp(var, "core.sparsecheckout")) { /* virtual file system relies on the sparse checkout logic so force it on */ if (core_virtualfilesystem) @@ -1505,6 +1511,37 @@ static int git_default_mailmap_config(const char *var, const char *value) return 0; } +static int git_default_gvfs_config(const char *var, const char *value) +{ + if (!strcmp(var, "gvfs.cache-server")) { + const char *v2 = NULL; + + if (!git_config_string(&v2, var, value) && v2 && *v2) + gvfs_cache_server_url = transport_anonymize_url(v2); + free((char*)v2); + return 0; + } + + if (!strcmp(var, "gvfs.sharedcache") && value && *value) { + struct strbuf buf = STRBUF_INIT; + strbuf_addstr(&buf, value); + if (strbuf_normalize_path(&buf) < 0) { + /* + * Pretend it wasn't set. This will cause us to + * fallback to ".git/objects" effectively. + */ + strbuf_release(&buf); + return 0; + } + strbuf_trim_trailing_dir_sep(&buf); + + gvfs_shared_cache_pathname = strbuf_detach(&buf, NULL); + return 0; + } + + return 0; +} + int git_default_config(const char *var, const char *value, void *cb) { if (starts_with(var, "core.")) @@ -1551,6 +1588,9 @@ int git_default_config(const char *var, const char *value, void *cb) return 0; } + if (starts_with(var, "gvfs.")) + return git_default_gvfs_config(var, value); + /* Add other config variables here and to Documentation/config.txt. */ return 0; } diff --git a/environment.c b/environment.c index 33ee0ad30a0d48..398455fb9318b7 100644 --- a/environment.c +++ b/environment.c @@ -87,6 +87,9 @@ int protect_hfs = PROTECT_HFS_DEFAULT; #endif int protect_ntfs = PROTECT_NTFS_DEFAULT; const char *core_fsmonitor; +int core_use_gvfs_helper; +const char *gvfs_cache_server_url; +const char *gvfs_shared_cache_pathname; /* * The character that begins a commented line in user-editable file diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c new file mode 100644 index 00000000000000..653b57dfd01563 --- /dev/null +++ b/gvfs-helper-client.c @@ -0,0 +1,391 @@ +#include "cache.h" +#include "argv-array.h" +#include "trace2.h" +#include "oidset.h" +#include "object.h" +#include "object-store.h" +#include "gvfs-helper-client.h" +#include "sub-process.h" +#include "sigchain.h" +#include "pkt-line.h" +#include "quote.h" +#include "packfile.h" + +static struct oidset gh_client__oidset_queued = OIDSET_INIT; +static unsigned long gh_client__oidset_count; +static int gh_client__includes_immediate; + +struct gh_server__process { + struct subprocess_entry subprocess; /* must be first */ + unsigned int supported_capabilities; +}; + +static int gh_server__subprocess_map_initialized; +static struct hashmap gh_server__subprocess_map; +static struct object_directory *gh_client__chosen_odb; + +#define CAP_GET (1u<<1) + +static int gh_client__start_fn(struct subprocess_entry *subprocess) +{ + static int versions[] = {1, 0}; + static struct subprocess_capability capabilities[] = { + { "get", CAP_GET }, + { NULL, 0 } + }; + + struct gh_server__process *entry = (struct gh_server__process *)subprocess; + + return subprocess_handshake(subprocess, "gvfs-helper", versions, + NULL, capabilities, + &entry->supported_capabilities); +} + +/* + * Send: + * + * get LF + * ( LF)* + * + * + */ +static int gh_client__get__send_command(struct child_process *process) +{ + struct oidset_iter iter; + struct object_id *oid; + int err; + + /* + * We assume that all of the packet_ routines call error() + * so that we don't have to. + */ + + err = packet_write_fmt_gently(process->in, "get\n"); + if (err) + return err; + + oidset_iter_init(&gh_client__oidset_queued, &iter); + while ((oid = oidset_iter_next(&iter))) { + err = packet_write_fmt_gently(process->in, "%s\n", + oid_to_hex(oid)); + if (err) + return err; + } + + err = packet_flush_gently(process->in); + if (err) + return err; + + return 0; +} + +/* + * Verify that the pathname found in the "odb" response line matches + * what we requested. + * + * Since we ALWAYS send a "--shared-cache=" arg to "gvfs-helper", + * we should be able to verify that the value is what we requested. + * In particular, I don't see a need to try to search for the response + * value in from our list of alternates. + */ +static void gh_client__verify_odb_line(const char *line) +{ + const char *v1_odb_path; + + if (!skip_prefix(line, "odb ", &v1_odb_path)) + BUG("verify_odb_line: invalid line '%s'", line); + + if (!gh_client__chosen_odb || + strcmp(v1_odb_path, gh_client__chosen_odb->path)) + BUG("verify_odb_line: unexpeced odb path '%s' vs '%s'", + v1_odb_path, gh_client__chosen_odb->path); +} + +/* + * Update the loose object cache to include the newly created + * object. + */ +static void gh_client__update_loose_cache(const char *line) +{ + const char *v1_oid; + struct object_id oid; + + if (!skip_prefix(line, "loose ", &v1_oid)) + BUG("update_loose_cache: invalid line '%s'", line); + + odb_loose_cache_add_new_oid(gh_client__chosen_odb, &oid); +} + +/* + * Update the packed-git list to include the newly created packfile. + */ +static void gh_client__update_packed_git(const char *line) +{ + struct strbuf path = STRBUF_INIT; + const char *v1_filename; + struct packed_git *p; + int is_local; + + if (!skip_prefix(line, "packfile ", &v1_filename)) + BUG("update_packed_git: invalid line '%s'", line); + + /* + * ODB[0] is the local .git/objects. All others are alternates. + */ + is_local = (gh_client__chosen_odb == the_repository->objects->odb); + + strbuf_addf(&path, "%s/pack/%s", + gh_client__chosen_odb->path, v1_filename); + strbuf_strip_suffix(&path, ".pack"); + strbuf_addstr(&path, ".idx"); + + p = add_packed_git(path.buf, path.len, is_local); + if (p) + install_packed_git_and_mru(the_repository, p); +} + +/* + * We expect: + * + * + * * + * + * + * + * Where: + * + * ::= odb SP LF + * + * ::= / + * + * ::= packfile SP LF + * + * ::= loose SP LF + * + * ::= ok LF + * / partial LF + * / error SP LF + * + * Note that `gvfs-helper` controls how/if it chunks the request when + * it talks to the cache-server and/or main Git server. So it is + * possible for us to receive many packfiles and/or loose objects *AND + * THEN* get a hard network error or a 404 on an individual object. + * + * If we get a partial result, we can let the caller try to continue + * -- for example, maybe an immediate request for a tree object was + * grouped with a queued request for a blob. The tree-walk *might* be + * able to continue and let the 404 blob be handled later. + */ +static int gh_client__get__receive_response( + struct child_process *process, + enum gh_client__created *p_ghc, + int *p_nr_loose, int *p_nr_packfile) +{ + enum gh_client__created ghc = GHC__CREATED__NOTHING; + const char *v1; + char *line; + int len; + int err = 0; + + while (1) { + /* + * Warning: packet_read_line_gently() calls die() + * despite the _gently moniker. + */ + len = packet_read_line_gently(process->out, NULL, &line); + if ((len < 0) || !line) + break; + + if (starts_with(line, "odb")) { + gh_client__verify_odb_line(line); + } + + else if (starts_with(line, "packfile")) { + gh_client__update_packed_git(line); + ghc |= GHC__CREATED__PACKFILE; + *p_nr_packfile += 1; + } + + else if (starts_with(line, "loose")) { + gh_client__update_loose_cache(line); + ghc |= GHC__CREATED__LOOSE; + *p_nr_loose += 1; + } + + else if (starts_with(line, "ok")) + ; + else if (starts_with(line, "partial")) + ; + else if (skip_prefix(line, "error ", &v1)) { + error("gvfs-helper error: '%s'", v1); + err = -1; + } + } + + *p_ghc = ghc; + + return err; +} + +static void gh_client__choose_odb(void) +{ + if (gh_client__chosen_odb) + return; + + prepare_alt_odb(the_repository); + gh_client__chosen_odb = the_repository->objects->odb; +} + +static int gh_client__get(enum gh_client__created *p_ghc) +{ + struct gh_server__process *entry; + struct child_process *process; + struct argv_array argv = ARGV_ARRAY_INIT; + struct strbuf quoted = STRBUF_INIT; + int nr_loose = 0; + int nr_packfile = 0; + int err = 0; + + trace2_region_enter("gh-client", "get", the_repository); + + gh_client__choose_odb(); + + /* + * TODO decide what defaults we want. + */ + argv_array_push(&argv, "gvfs-helper"); + argv_array_push(&argv, "--fallback"); + argv_array_push(&argv, "--cache-server=trust"); + argv_array_pushf(&argv, "--shared-cache=%s", + gh_client__chosen_odb->path); + argv_array_push(&argv, "server"); + + sq_quote_argv_pretty("ed, argv.argv); + + if (!gh_server__subprocess_map_initialized) { + gh_server__subprocess_map_initialized = 1; + hashmap_init(&gh_server__subprocess_map, + (hashmap_cmp_fn)cmd2process_cmp, NULL, 0); + entry = NULL; + } else + entry = (struct gh_server__process *)subprocess_find_entry( + &gh_server__subprocess_map, quoted.buf); + + if (!entry) { + entry = xmalloc(sizeof(*entry)); + entry->supported_capabilities = 0; + + err = subprocess_start_argv( + &gh_server__subprocess_map, &entry->subprocess, 1, + &argv, gh_client__start_fn); + if (err) { + free(entry); + goto leave_region; + } + } + + process = &entry->subprocess.process; + + if (!(CAP_GET & entry->supported_capabilities)) { + error("gvfs-helper: does not support GET"); + subprocess_stop(&gh_server__subprocess_map, + (struct subprocess_entry *)entry); + free(entry); + err = -1; + goto leave_region; + } + + sigchain_push(SIGPIPE, SIG_IGN); + + err = gh_client__get__send_command(process); + if (!err) + err = gh_client__get__receive_response(process, p_ghc, + &nr_loose, &nr_packfile); + + sigchain_pop(SIGPIPE); + + if (err) { + subprocess_stop(&gh_server__subprocess_map, + (struct subprocess_entry *)entry); + free(entry); + } + +leave_region: + argv_array_clear(&argv); + strbuf_release("ed); + + trace2_data_intmax("gh-client", the_repository, + "get/immediate", gh_client__includes_immediate); + + trace2_data_intmax("gh-client", the_repository, + "get/nr_objects", gh_client__oidset_count); + + if (nr_loose) + trace2_data_intmax("gh-client", the_repository, + "get/nr_loose", nr_loose); + + if (nr_packfile) + trace2_data_intmax("gh-client", the_repository, + "get/nr_packfile", nr_packfile); + + if (err) + trace2_data_intmax("gh-client", the_repository, + "get/error", err); + + trace2_region_leave("gh-client", "get", the_repository); + + oidset_clear(&gh_client__oidset_queued); + gh_client__oidset_count = 0; + gh_client__includes_immediate = 0; + + return err; +} + +void gh_client__queue_oid(const struct object_id *oid) +{ + // TODO consider removing this trace2. it is useful for interactive + // TODO debugging, but may generate way too much noise for a data + // TODO event. + trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid)); + + if (!oidset_insert(&gh_client__oidset_queued, oid)) + gh_client__oidset_count++; +} + +/* + * This routine should actually take a "const struct oid_array *" + * rather than the component parts, but fetch_objects() uses + * this model (because of the call in sha1-file.c). + */ +void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr) +{ + int k; + + for (k = 0; k < oid_nr; k++) + gh_client__queue_oid(&oids[k]); +} + +int gh_client__drain_queue(enum gh_client__created *p_ghc) +{ + *p_ghc = GHC__CREATED__NOTHING; + + if (!gh_client__oidset_count) + return 0; + + return gh_client__get(p_ghc); +} +int gh_client__get_immediate(const struct object_id *oid, + enum gh_client__created *p_ghc) +{ + gh_client__includes_immediate = 1; + + // TODO consider removing this trace2. it is useful for interactive + // TODO debugging, but may generate way too much noise for a data + // TODO event. + trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid)); + + if (!oidset_insert(&gh_client__oidset_queued, oid)) + gh_client__oidset_count++; + + return gh_client__drain_queue(p_ghc); +} diff --git a/gvfs-helper-client.h b/gvfs-helper-client.h new file mode 100644 index 00000000000000..a5a951ff5b5bfe --- /dev/null +++ b/gvfs-helper-client.h @@ -0,0 +1,68 @@ +#ifndef GVFS_HELPER_CLIENT_H +#define GVFS_HELPER_CLIENT_H + +struct repository; +struct commit; + +enum gh_client__created { + /* + * The _get_ operation did not create anything. If doesn't + * matter if `gvfs-helper` had errors or not -- just that + * nothing was created. + */ + GHC__CREATED__NOTHING = 0, + + /* + * The _get_ operation created one or more packfiles. + */ + GHC__CREATED__PACKFILE = 1<<1, + + /* + * The _get_ operation created one or more loose objects. + * (Not necessarily the for the individual OID you requested.) + */ + GHC__CREATED__LOOSE = 1<<2, + + /* + * The _get_ operation created one or more packfilea *and* + * one or more loose objects. + */ + GHC__CREATED__PACKFILE_AND_LOOSE = (GHC__CREATED__PACKFILE | + GHC__CREATED__LOOSE), +}; + +/* + * Ask `gvfs-helper server` to immediately fetch a single object + * using "/gvfs/objects" GET semantics. + * + * A long-running background process is used to make subsequent + * requests more efficient. + * + * A loose object will be created in the shared-cache ODB and + * in-memory cache updated. + */ +int gh_client__get_immediate(const struct object_id *oid, + enum gh_client__created *p_ghc); + +/* + * Queue this OID for a future fetch using `gvfs-helper service`. + * It does not wait. + * + * Callers should not rely on the queued object being on disk until + * the queue has been drained. + */ +void gh_client__queue_oid(const struct object_id *oid); +void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr); + +/* + * Ask `gvfs-helper server` to fetch the set of queued OIDs using + * "/gvfs/objects" POST semantics. + * + * A long-running background process is used to subsequent requests + * more efficient. + * + * One or more packfiles will be created in the shared-cache ODB. + */ +int gh_client__drain_queue(enum gh_client__created *p_ghc); + +#endif /* GVFS_HELPER_CLIENT_H */ diff --git a/gvfs-helper.c b/gvfs-helper.c new file mode 100644 index 00000000000000..60ae7a8a62fa89 --- /dev/null +++ b/gvfs-helper.c @@ -0,0 +1,2235 @@ +// TODO Write a man page. Here are some notes for dogfooding. +// TODO +// +// Usage: git gvfs-helper [] [] +// +// : +// +// --remote= // defaults to "origin" +// +// --fallback // boolean. defaults to off +// +// When a fetch from the cache-server fails, automatically +// fallback to the main Git server. This option has no effect +// if no cache-server is defined. +// +// --cache-server= // defaults to "verify" +// +// verify := lookup the set of defined cache-servers using +// "gvfs/config" and confirm that the selected +// cache-server is well-known. Silently disable the +// cache-server if not. (See security notes later.) +// +// error := verify cache-server and abort if not well-known. +// +// trust := do not verify cache-server. just use it. +// +// disable := disable the cache-server and always use the main +// Git server. +// +// --shared-cache= +// +// A relative or absolute pathname to the ODB directory to store +// fetched objects. +// +// If this option is not specified, we default to the value +// in the "gvfs.sharedcache" config setting and then to the +// local ".git/objects" directory. +// +// : +// +// config +// +// Fetch the "gvfs/config" string from the main Git server. +// (The cache-server setting is ignored because cache-servers +// do not support this REST API.) +// +// get +// +// Fetch 1 or more objects. If a cache-server is configured, +// try it first. Optionally fallback to the main Git server. +// +// The set of objects is given on stdin and is assumed to be +// a list of , one per line. +// +// : +// +// --block-size= // defaults to "4000" +// +// Request objects from server in batches of at +// most n objects (not bytes). +// +// --depth= // defaults to "1" +// +// server +// +// Interactive/sub-process mode. Listen for a series of commands +// and data on stdin and return results on stdout. This command +// uses pkt-line format [1] and implements the long-running process +// protocol [2] to communicate with the foreground/parent process. +// +// : +// +// --block-size= // defaults to "4000" +// +// Request objects from server in batches of at +// most n objects (not bytes). +// +// --depth= // defaults to "1" +// +// Interactive verb: get +// +// Fetch 1 or more objects. If a cache-server is configured, +// try it first. Optionally fallback to the main Git server. +// Create 1 or more loose objects and/or packfiles in the +// requested shared-cache directory (given on the command +// line and which is reported at the beginning of the +// response). +// +// git> get +// git> +// git> +// git> ... +// git> +// git> 0000 +// +// git< odb +// git< loose | packfile +// git< loose | packfile +// gid< ... +// git< loose | packfile +// git< ok | partial | error +// git< 0000 +// +// [1] Documentation/technical/protocol-common.txt +// [2] Documentation/technical/long-running-process-protocol.txt +// [3] See GIT_TRACE_PACKET +// +// Example: +// +// $ git -c core.virtualizeobjects=false -c core.usegvfshelper=false +// rev-list --objects --no-walk --missing=print HEAD +// | grep "^?" +// | sed 's/^?//' +// | git gvfs-helper get-missing +// +// Note: In this example, we need to turn off "core.virtualizeobjects" and +// "core.usegvfshelper" when building the list of objects. This prevents +// rev-list (in oid_object_info_extended() from automatically fetching +// them with read-object-hook or "gvfs-helper server" sub-process (and +// defeating the whole purpose of this example). +// +////////////////////////////////////////////////////////////////// + +#include "cache.h" +#include "config.h" +#include "remote.h" +#include "connect.h" +#include "strbuf.h" +#include "walker.h" +#include "http.h" +#include "exec-cmd.h" +#include "run-command.h" +#include "pkt-line.h" +#include "string-list.h" +#include "sideband.h" +#include "argv-array.h" +#include "credential.h" +#include "oid-array.h" +#include "send-pack.h" +#include "protocol.h" +#include "quote.h" +#include "transport.h" +#include "parse-options.h" +#include "object-store.h" +#include "json-writer.h" +#include "tempfile.h" +#include "oidset.h" +#include "dir.h" +#include "progress.h" + +static const char * const main_usage[] = { + N_("git gvfs-helper [] config []"), + N_("git gvfs-helper [] get []"), + N_("git gvfs-helper [] server []"), + NULL +}; + +static const char *const get_usage[] = { + N_("git gvfs-helper [] get []"), + NULL +}; + +static const char *const server_usage[] = { + N_("git gvfs-helper [] server []"), + NULL +}; + +#define GH__DEFAULT_BLOCK_SIZE 4000 + +/* + * Our exit-codes. + */ +enum gh__error_code { + GH__ERROR_CODE__USAGE = -1, /* will be mapped to usage() */ + GH__ERROR_CODE__OK = 0, + GH__ERROR_CODE__ERROR = 1, /* unspecified */ +// GH__ERROR_CODE__CACHE_SERVER_NOT_FOUND = 2, + GH__ERROR_CODE__CURL_ERROR = 3, + GH__ERROR_CODE__HTTP_401 = 4, + GH__ERROR_CODE__HTTP_404 = 5, + GH__ERROR_CODE__HTTP_UNEXPECTED_CODE = 6, + GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE = 7, + GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE = 8, + GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE = 9, + GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE = 10, + GH__ERROR_CODE__SUBPROCESS_SYNTAX = 11, +}; + +enum gh__cache_server_mode { + /* verify URL. disable if unknown. */ + GH__CACHE_SERVER_MODE__VERIFY_DISABLE = 0, + /* verify URL. error if unknown. */ + GH__CACHE_SERVER_MODE__VERIFY_ERROR, + /* disable the cache-server, if defined */ + GH__CACHE_SERVER_MODE__DISABLE, + /* trust any cache-server */ + GH__CACHE_SERVER_MODE__TRUST_WITHOUT_VERIFY, +}; + +/* + * The set of command line, config, and environment variables + * that we use as input to decide how we should operate. + */ +static struct gh__cmd_opts { + const char *remote_name; + + int try_fallback; /* to git server if cache-server fails */ + int show_progress; + + int depth; + int block_size; + + enum gh__cache_server_mode cache_server_mode; +} gh__cmd_opts; + +/* + * The chosen global state derrived from the inputs in gh__cmd_opts. + */ +static struct gh__global { + struct remote *remote; + + struct credential main_creds; + struct credential cache_creds; + + const char *main_url; + const char *cache_server_url; + + struct strbuf buf_odb_path; + + int http_is_initialized; + int cache_server_is_initialized; /* did sub-command look for one */ + int main_creds_need_approval; /* try to only approve them once */ + +} gh__global; + +/* + * Stolen from http.c + */ +static CURLcode gh__curlinfo_strbuf(CURL *curl, CURLINFO info, struct strbuf *buf) +{ + char *ptr; + CURLcode ret; + + strbuf_reset(buf); + ret = curl_easy_getinfo(curl, info, &ptr); + if (!ret && ptr) + strbuf_addstr(buf, ptr); + return ret; +} + +enum gh__progress_state { + GH__PROGRESS_STATE__START = 0, + GH__PROGRESS_STATE__PHASE1, + GH__PROGRESS_STATE__PHASE2, + GH__PROGRESS_STATE__PHASE3, +}; + +/* + * Parameters to drive an HTTP request (with any necessary retries). + */ +struct gh__request_params { + int b_is_post; /* POST=1 or GET=0 */ + int b_write_to_file; /* write to file=1 or strbuf=0 */ + int b_no_cache_server; /* force main server only */ + + unsigned long object_count; /* number of objects being fetched */ + + const struct strbuf *post_payload; /* POST body to send */ + + struct curl_slist *headers; /* additional http headers to send */ + struct tempfile *tempfile; /* for response content when file */ + struct strbuf *buffer; /* for response content when strbuf */ + struct strbuf label; /* for trace2 regions */ + + struct strbuf loose_path; + + /* + * Note that I am putting all of the progress-related instance data + * inside the request-params in the hope that we can eventually + * do multi-threaded/concurrent HTTP requests when chunking + * large requests. However, the underlying "struct progress" API + * is not thread safe (that is, it doesn't allow concurrent progress + * reports (since that might require multiple lines on the screen + * or something)). + */ + enum gh__progress_state progress_state; + struct strbuf progress_base_phase2_msg; + struct strbuf progress_base_phase3_msg; + + /* + * The buffer for the formatted progress message is shared by the + * "struct progress" API and must remain valid for the duration of + * the start_progress..stop_progress lifespan. + */ + struct strbuf progress_msg; + struct progress *progress; +}; + +#define GH__REQUEST_PARAMS_INIT { \ + .b_is_post = 0, \ + .b_write_to_file = 0, \ + .b_no_cache_server = 0, \ + .object_count = 0, \ + .post_payload = NULL, \ + .headers = NULL, \ + .tempfile = NULL, \ + .buffer = NULL, \ + .label = STRBUF_INIT, \ + .loose_path = STRBUF_INIT, \ + .progress_state = GH__PROGRESS_STATE__START, \ + .progress_base_phase2_msg = STRBUF_INIT, \ + .progress_base_phase3_msg = STRBUF_INIT, \ + .progress_msg = STRBUF_INIT, \ + .progress = NULL, \ + } + +static void gh__request_params__release(struct gh__request_params *params) +{ + if (!params) + return; + + params->post_payload = NULL; /* we do not own this */ + + curl_slist_free_all(params->headers); + params->headers = NULL; + + delete_tempfile(¶ms->tempfile); + + params->buffer = NULL; /* we do not own this */ + + strbuf_release(¶ms->label); + strbuf_release(¶ms->loose_path); + + strbuf_release(¶ms->progress_base_phase2_msg); + strbuf_release(¶ms->progress_base_phase3_msg); + strbuf_release(¶ms->progress_msg); + + stop_progress(¶ms->progress); + params->progress = NULL; +} + +/* + * Bucket to describe the results of an HTTP requests (may be + * overwritten during retries so that it describes the final attempt). + */ +struct gh__response_status { + struct strbuf error_message; + struct strbuf content_type; + long response_code; /* http response code */ + CURLcode curl_code; + enum gh__error_code ec; + intmax_t bytes_received; +}; + +#define GH__RESPONSE_STATUS_INIT { \ + .error_message = STRBUF_INIT, \ + .content_type = STRBUF_INIT, \ + .response_code = 0, \ + .curl_code = CURLE_OK, \ + .ec = GH__ERROR_CODE__OK, \ + .bytes_received = 0, \ + } + +static void gh__response_status__zero(struct gh__response_status *s) +{ + strbuf_setlen(&s->error_message, 0); + strbuf_setlen(&s->content_type, 0); + s->response_code = 0; + s->curl_code = CURLE_OK; + s->ec = GH__ERROR_CODE__OK; + s->bytes_received = 0; +} + +/* + * Create a single normalized 'ec' error-code from the status we + * received from the HTTP request. Map a few of the expected HTTP + * status code to 'ec', but don't get too crazy here. + */ +static void gh__response_status__set_from_slot( + struct gh__request_params *params, + struct gh__response_status *status, + const struct active_request_slot *slot) +{ + status->curl_code = slot->results->curl_result; + gh__curlinfo_strbuf(slot->curl, CURLINFO_CONTENT_TYPE, + &status->content_type); + curl_easy_getinfo(slot->curl, CURLINFO_RESPONSE_CODE, + &status->response_code); + + strbuf_setlen(&status->error_message, 0); + + if (status->response_code == 200) + status->ec = GH__ERROR_CODE__OK; + + else if (status->response_code == 401) { + strbuf_addstr(&status->error_message, "401 Not Authorized"); + status->ec = GH__ERROR_CODE__HTTP_401; + + } else if (status->response_code == 404) { + strbuf_addstr(&status->error_message, "404 Not Found"); + status->ec = GH__ERROR_CODE__HTTP_404; + + } else if (status->curl_code != CURLE_OK) { + strbuf_addf(&status->error_message, "%s (curl)", + curl_easy_strerror(status->curl_code)); + status->ec = GH__ERROR_CODE__CURL_ERROR; + } else { + strbuf_addf(&status->error_message, "HTTP %ld Unexpected", + status->response_code); + status->ec = GH__ERROR_CODE__HTTP_UNEXPECTED_CODE; + } + + if (status->ec != GH__ERROR_CODE__OK) + status->bytes_received = 0; + else if (params->b_write_to_file) + status->bytes_received = (intmax_t)ftell(params->tempfile->fp); + else + status->bytes_received = (intmax_t)params->buffer->len; +} + +static void gh__response_status__release(struct gh__response_status *status) +{ + if (!status) + return; + strbuf_release(&status->error_message); + strbuf_release(&status->content_type); +} + +/* + * The cache-server sends a somewhat bogus 400 instead of + * the normal 401 when AUTH is required. Fixup the status + * to hide that. + */ +static void fixup_cache_server_400_to_401(struct gh__response_status *status) +{ + if (status->response_code != 400) + return; + + /* + * TODO Technically, the cache-server could send a 400 + * TODO for many reasons, not just for their bogus + * TODO pseudo-401, but we're going to assume it is a + * TODO 401 for now. We should confirm the expected + * TODO error message in the response-body. + */ + status->response_code = 401; +} + +static int gh__curl_progress_cb(void *clientp, + curl_off_t dltotal, curl_off_t dlnow, + curl_off_t ultotal, curl_off_t ulnow) +{ + struct gh__request_params *params = clientp; + + /* + * From what I can tell, CURL progress arrives in 3 phases. + * + * [1] An initial connection setup phase where we get [0,0] [0,0]. + * [2] An upload phase where we start sending the request headers + * and body. ulnow will be > 0. ultotal may or may not be 0. + * [3] A download phase where we start receiving the response + * headers and payload body. dlnow will be > 0. dltotal may + * or may not be 0. + * + * If we pass zero for the total to the "struct progress" API, we + * get simple numbers rather than percentages. So our progress + * output format may vary depending. + * + * It is unclear if CURL will give us a final callback after + * everything is finished, so we leave the progress handle open + * and let the caller issue the final stop_progress(). + * + * There is a bit of a mismatch between the CURL API and the + * "struct progress" API. The latter requires us to set the + * progress message when we call one of the start_progress + * methods. We cannot change the progress message while we are + * showing progress state. And we cannot change the denominator + * (total) after we start. CURL may or may not give us the total + * sizes for each phase. + * + * Also be advised that the "struct progress" API eats messages + * so that the screen is only updated every second or so. And + * may not print anything if the start..stop happen in less then + * 2 seconds. Whereas CURL calls this callback very frequently. + * The net-net is that we may not actually see this progress + * message for small/fast HTTP requests. + */ + + switch (params->progress_state) { + case GH__PROGRESS_STATE__START: /* first callback */ + if (dlnow == 0 && ulnow == 0) + goto enter_phase_1; + + if (ulnow) + goto enter_phase_2; + else + goto enter_phase_3; + + case GH__PROGRESS_STATE__PHASE1: + if (dlnow == 0 && ulnow == 0) + return 0; + + if (ulnow) + goto enter_phase_2; + else + goto enter_phase_3; + + case GH__PROGRESS_STATE__PHASE2: + display_progress(params->progress, ulnow); + if (dlnow == 0) + return 0; + + stop_progress(¶ms->progress); + goto enter_phase_3; + + case GH__PROGRESS_STATE__PHASE3: + display_progress(params->progress, dlnow); + return 0; + + default: + return 0; + } + +enter_phase_1: + /* + * Don't bother to create a progress handle during phase [1]. + * Because we get [0,0,0,0], we don't have any data to report + * and would just have to synthesize some type of progress. + * From my testing, phase [1] is fairly quick (probably just + * the SSL handshake), so the "struct progress" API will most + * likely completely eat any messages that we did produce. + */ + params->progress_state = GH__PROGRESS_STATE__PHASE1; + return 0; + +enter_phase_2: + strbuf_setlen(¶ms->progress_msg, 0); + if (params->progress_base_phase2_msg.len) { + strbuf_addf(¶ms->progress_msg, "%s (bytes sent)", + params->progress_base_phase2_msg.buf); + params->progress = start_progress(params->progress_msg.buf, ultotal); + display_progress(params->progress, ulnow); + } + params->progress_state = GH__PROGRESS_STATE__PHASE2; + return 0; + +enter_phase_3: + strbuf_setlen(¶ms->progress_msg, 0); + if (params->progress_base_phase3_msg.len) { + strbuf_addf(¶ms->progress_msg, "%s (bytes received)", + params->progress_base_phase3_msg.buf); + params->progress = start_progress(params->progress_msg.buf, dltotal); + display_progress(params->progress, dlnow); + } + params->progress_state = GH__PROGRESS_STATE__PHASE3; + return 0; +} + +/* + * Run the request without using "run_one_slot()" because we + * don't want the post-request normalization, error handling, + * and auto-reauth handling in http.c. + */ +static void gh__run_one_slot(struct active_request_slot *slot, + struct gh__request_params *params, + struct gh__response_status *status) +{ + trace2_region_enter("gvfs-helper", params->label.buf, NULL); + + if (!start_active_slot(slot)) { + status->curl_code = CURLE_FAILED_INIT; /* a bit of a lie */ + strbuf_addstr(&status->error_message, + "failed to start HTTP request"); + } else { + run_active_slot(slot); + if (params->b_write_to_file) + fflush(params->tempfile->fp); + + gh__response_status__set_from_slot(params, status, slot); + + if (status->ec == GH__ERROR_CODE__OK) { + int old_len = params->label.len; + + strbuf_addstr(¶ms->label, "/nr_objects"); + trace2_data_intmax("gvfs-helper", NULL, + params->label.buf, + params->object_count); + strbuf_setlen(¶ms->label, old_len); + + strbuf_addstr(¶ms->label, "/nr_bytes"); + trace2_data_intmax("gvfs-helper", NULL, + params->label.buf, + status->bytes_received); + strbuf_setlen(¶ms->label, old_len); + } + } + + if (params->progress) + stop_progress(¶ms->progress); + + trace2_region_leave("gvfs-helper", params->label.buf, NULL); +} + +static int option_parse_cache_server_mode(const struct option *opt, + const char *arg, int unset) +{ + if (unset) /* should not happen */ + return error(_("missing value for switch '%s'"), + opt->long_name); + + else if (!strcmp(arg, "verify")) + gh__cmd_opts.cache_server_mode = + GH__CACHE_SERVER_MODE__VERIFY_DISABLE; + + else if (!strcmp(arg, "error")) + gh__cmd_opts.cache_server_mode = + GH__CACHE_SERVER_MODE__VERIFY_ERROR; + + else if (!strcmp(arg, "disable")) + gh__cmd_opts.cache_server_mode = + GH__CACHE_SERVER_MODE__DISABLE; + + else if (!strcmp(arg, "trust")) + gh__cmd_opts.cache_server_mode = + GH__CACHE_SERVER_MODE__TRUST_WITHOUT_VERIFY; + + else + return error(_("invalid value for switch '%s'"), + opt->long_name); + + return 0; +} + +/* + * Let command line args override "gvfs.sharedcache" config setting. + * + * It would be nice to move this to parse-options.c as an + * OPTION_PATHNAME handler. And maybe have flags for exists() + * and is_directory(). + */ +static int option_parse_shared_cache_directory(const struct option *opt, + const char *arg, int unset) +{ + if (unset) /* should not happen */ + return error(_("missing value for switch '%s'"), + opt->long_name); + + if (!is_directory(arg)) + return error(_("value for switch '%s' is not a directory: '%s'"), + opt->long_name, arg); + + gvfs_shared_cache_pathname = arg; + + return 0; +} + +/* + * Lookup the URL for this remote (defaults to 'origin'). + */ +static void lookup_main_url(void) +{ + /* + * Both VFS and Scalar only work with 'origin', so we expect this. + * The command line arg is mainly for debugging. + */ + if (!gh__cmd_opts.remote_name || !*gh__cmd_opts.remote_name) + gh__cmd_opts.remote_name = "origin"; + + gh__global.remote = remote_get(gh__cmd_opts.remote_name); + if (!gh__global.remote->url[0] || !*gh__global.remote->url[0]) + die("unknown remote '%s'", gh__cmd_opts.remote_name); + + /* + * Strip out any in-line auth in the origin server URL so that + * we can control which creds we fetch. + * + * Azure DevOps has been known to suggest https URLS of the + * form "https://@dev.azure.com//". + * + * Break that so that we can force the use of a PAT. + */ + gh__global.main_url = transport_anonymize_url(gh__global.remote->url[0]); + + trace2_data_string("gvfs-helper", NULL, "remote/url", gh__global.main_url); +} + +static void do__gvfs_config(struct gh__response_status *status, + struct strbuf *config_data); + +/* + * Find the URL of the cache-server, if we have one. + * + * This routined is called by the initialization code and is allowed + * to call die() rather than returning an 'ec'. + */ +static void select_cache_server(void) +{ + struct gh__response_status status = GH__RESPONSE_STATUS_INIT; + struct strbuf config_data = STRBUF_INIT; + const char *match = NULL; + + /* + * This only indicates that the sub-command actually called + * this routine. We rely on gh__global.cache_server_url to tell + * us if we actually have a cache-server configured. + */ + gh__global.cache_server_is_initialized = 1; + gh__global.cache_server_url = NULL; + + if (gh__cmd_opts.cache_server_mode == GH__CACHE_SERVER_MODE__DISABLE) { + trace2_data_string("gvfs-helper", NULL, "cache/url", "disabled"); + return; + } + + /* + * If the cache-server and main Git server have the same URL, we + * can silently disable the cache-server (by NOT setting the field + * in gh__global and explicitly disable the fallback logic.) + */ + if (!strcmp(gvfs_cache_server_url, gh__global.main_url)) { + gh__cmd_opts.try_fallback = 0; + trace2_data_string("gvfs-helper", NULL, "cache/url", "same"); + return; + } + + if (gh__cmd_opts.cache_server_mode == + GH__CACHE_SERVER_MODE__TRUST_WITHOUT_VERIFY) { + gh__global.cache_server_url = gvfs_cache_server_url; + trace2_data_string("gvfs-helper", NULL, "cache/url", + gvfs_cache_server_url); + return; + } + + /* + * GVFS cache-servers use the main Git server's creds rather + * than having their own creds. This feels like a security + * hole. For example, if the cache-server URL is pointed to a + * bad site, we'll happily send them our creds to the main Git + * server with each request to the cache-server. This would + * allow an attacker to later use our creds to impersonate us + * on the main Git server. + * + * So we optionally verify that the URL to the cache-server is + * well-known by the main Git server. + */ + + do__gvfs_config(&status, &config_data); + + if (status.ec == GH__ERROR_CODE__OK) { + /* + * The gvfs/config response is in JSON, but I don't think + * we need to parse it and all that. Lets just do a simple + * strstr() and assume it is sufficient. + * + * We do add some context to the pattern to guard against + * some attacks. + */ + struct strbuf pattern = STRBUF_INIT; + + strbuf_addf(&pattern, "\"Url\":\"%s\"", gvfs_cache_server_url); + match = strstr(config_data.buf, pattern.buf); + + strbuf_release(&pattern); + } + + strbuf_release(&config_data); + + if (match) { + gh__global.cache_server_url = gvfs_cache_server_url; + trace2_data_string("gvfs-helper", NULL, "cache/url", + gvfs_cache_server_url); + } + + else if (gh__cmd_opts.cache_server_mode == + GH__CACHE_SERVER_MODE__VERIFY_ERROR) { + if (status.ec != GH__ERROR_CODE__OK) + die("could not verify cache-server '%s': %s", + gvfs_cache_server_url, + status.error_message.buf); + else + die("could not verify cache-server '%s'", + gvfs_cache_server_url); + } + + else if (gh__cmd_opts.cache_server_mode == + GH__CACHE_SERVER_MODE__VERIFY_DISABLE) { + if (status.ec != GH__ERROR_CODE__OK) + warning("could not verify cache-server '%s': %s", + gvfs_cache_server_url, + status.error_message.buf); + else + warning("could not verify cache-server '%s'", + gvfs_cache_server_url); + trace2_data_string("gvfs-helper", NULL, "cache/url", + "disabled"); + } + + gh__response_status__release(&status); +} + +/* + * Read stdin until EOF (or a blank line) and add the desired OIDs + * to the oidset. + * + * Stdin should contain a list of OIDs. It may have additional + * decoration that we need to strip out. + * + * We expect: + * [] // present OIDs + */ +static unsigned long read_stdin_from_rev_list(struct oidset *oids) +{ + struct object_id oid; + struct strbuf buf_stdin = STRBUF_INIT; + unsigned long count = 0; + + do { + if (strbuf_getline(&buf_stdin, stdin) == EOF || !buf_stdin.len) + break; + + if (get_oid_hex(buf_stdin.buf, &oid)) + continue; /* just silently eat it */ + + if (!oidset_insert(oids, &oid)) + count++; + } while (1); + + return count; +} + +/* + * Build a complete JSON payload for a gvfs/objects POST request + * containing the first n OIDs in an OIDSET index by the iterator. + * + * https://github.com/microsoft/VFSForGit/blob/master/Protocol.md + */ +static unsigned long build_json_payload__gvfs_objects( + struct json_writer *jw_req, + struct oidset_iter *iter, + unsigned long nr_in_block) +{ + unsigned long k; + const struct object_id *oid; + + k = 0; + + jw_init(jw_req); + jw_object_begin(jw_req, 0); + jw_object_intmax(jw_req, "commitDepth", gh__cmd_opts.depth); + jw_object_inline_begin_array(jw_req, "objectIds"); + while (k < nr_in_block && (oid = oidset_iter_next(iter))) { + jw_array_string(jw_req, oid_to_hex(oid)); + k++; + } + jw_end(jw_req); + jw_end(jw_req); + + return k; +} + +/* + * Lookup the creds for the main/origin Git server. + */ +static void lookup_main_creds(void) +{ + if (gh__global.main_creds.username && *gh__global.main_creds.username) + return; + + credential_from_url(&gh__global.main_creds, gh__global.main_url); + credential_fill(&gh__global.main_creds); + gh__global.main_creds_need_approval = 1; +} + +/* + * If we have a set of creds for the main Git server, tell the credential + * manager to throw them away and ask it to reacquire them. + */ +static void refresh_main_creds(void) +{ + if (gh__global.main_creds.username && *gh__global.main_creds.username) + credential_reject(&gh__global.main_creds); + + lookup_main_creds(); + + // TODO should we compare before and after values of u/p and + // TODO shortcut reauth if we already know it will fail? + // TODO if so, return a bool if same/different. +} + +static void approve_main_creds(void) +{ + if (!gh__global.main_creds_need_approval) + return; + + credential_approve(&gh__global.main_creds); + gh__global.main_creds_need_approval = 0; +} + +/* + * Build a set of creds for the cache-server based upon the main Git + * server (assuming we have a cache-server configured). + * + * That is, we NEVER fill them directly for the cache-server -- we + * only synthesize them from the filled main creds. + */ +static void synthesize_cache_server_creds(void) +{ + if (!gh__global.cache_server_is_initialized) + BUG("sub-command did not initialize cache-server vars"); + + if (!gh__global.cache_server_url) + return; + + if (gh__global.cache_creds.username && *gh__global.cache_creds.username) + return; + + /* + * Get the main Git server creds so we can borrow the username + * and password when we talk to the cache-server. + */ + lookup_main_creds(); + gh__global.cache_creds.username = xstrdup(gh__global.main_creds.username); + gh__global.cache_creds.password = xstrdup(gh__global.main_creds.password); +} + +/* + * Flush and refresh the cache-server creds. Because the cache-server + * does not do 401s (or manage creds), we have to reload the main Git + * server creds first. + * + * That is, we NEVER reject them directly because we never filled them. + */ +static void refresh_cache_server_creds(void) +{ + credential_clear(&gh__global.cache_creds); + + refresh_main_creds(); + synthesize_cache_server_creds(); +} + +/* + * We NEVER approve cache-server creds directly because we never directly + * filled them. However, we should be able to infer that the main ones + * are valid and can approve them if necessary. + */ +static void approve_cache_server_creds(void) +{ + approve_main_creds(); +} + +/* + * Select the ODB directory where we will write objects that we + * download. If was given on the command line or define in the + * config, use the local ODB (in ".git/objects"). + */ +static void select_odb(void) +{ + const char *odb_path = NULL; + + strbuf_init(&gh__global.buf_odb_path, 0); + + if (gvfs_shared_cache_pathname && *gvfs_shared_cache_pathname) + odb_path = gvfs_shared_cache_pathname; + else { + prepare_alt_odb(the_repository); + odb_path = the_repository->objects->odb->path; + } + + strbuf_addstr(&gh__global.buf_odb_path, odb_path); +} + +/* + * Create a tempfile to stream the packfile into. + * + * We create a tempfile in the chosen ODB directory and let CURL + * automatically stream data to the file. If successful, we can + * later rename it to a proper .pack and run "git index-pack" on + * it to create the corresponding .idx file. + * + * TODO I would rather to just stream the packfile directly into + * TODO "git index-pack --stdin" (and save some I/O) because it + * TODO will automatically take care of the rename of both files + * TODO and any other cleanup. BUT INDEX-PACK WILL ONLY WRITE + * TODO TO THE PRIMARY ODB -- it will not write into the alternates + * TODO (this is considered bad form). So we would need to add + * TODO an option to index-pack to handle this. I don't want to + * TODO deal with this issue right now. + * + * TODO Consider using lockfile for this rather than naked tempfile. + */ +static struct tempfile *create_tempfile_for_packfile(void) +{ + static unsigned int nth = 0; + static struct timeval tv = {0}; + static struct tm tm = {0}; + static time_t secs = 0; + static char tbuf[32] = {0}; + + struct tempfile *tempfile = NULL; + struct strbuf buf_path = STRBUF_INIT; + + if (!nth) { + gettimeofday(&tv, NULL); + secs = tv.tv_sec; + gmtime_r(&secs, &tm); + + xsnprintf(tbuf, sizeof(tbuf), "%4d%02d%02d-%02d%02d%02d-%06ld", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, + (long)tv.tv_usec); + } + + // TODO should this be in the "/pack/tempPacks/" + // TODO directory instead? YES + + strbuf_addbuf(&buf_path, &gh__global.buf_odb_path); + strbuf_complete(&buf_path, '/'); + strbuf_addf(&buf_path, "pack/vfs-%s-%04d.temp", tbuf, nth++); + + tempfile = create_tempfile(buf_path.buf); + fdopen_tempfile(tempfile, "w"); + + strbuf_release(&buf_path); + + return tempfile; +} + +/* + * Create a tempfile to stream a loose object into. + * + * We create a tempfile in the chosen ODB directory and let CURL + * automatically stream data to the file. + * + * We put it directly in the "/xx/" directory. + */ +static void create_tempfile_for_loose( + struct gh__request_params *params, + struct gh__response_status *status, + const struct object_id *oid) +{ + struct strbuf buf_path = STRBUF_INIT; + const char *hex; + + gh__response_status__zero(status); + + hex = oid_to_hex(oid); + + strbuf_addbuf(&buf_path, &gh__global.buf_odb_path); + strbuf_complete(&buf_path, '/'); + strbuf_add(&buf_path, hex, 2); + + if (!file_exists(buf_path.buf) && mkdir(buf_path.buf, 0777) == -1) { + strbuf_addf(&status->error_message, + "cannot create directory for loose object '%s'", + buf_path.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; + goto cleanup; + } + + strbuf_addch(&buf_path, '/'); + strbuf_addstr(&buf_path, hex+2); + + /* Remember the full path of the final destination. */ + strbuf_setlen(¶ms->loose_path, 0); + strbuf_addbuf(¶ms->loose_path, &buf_path); + + /* + * Build a unique tempfile pathname based upon it. We avoid + * using lockfiles to avoid issues with stale locks after + * crashes. + */ + strbuf_addf(&buf_path, ".%08u.temp", getpid()); + + params->tempfile = create_tempfile(buf_path.buf); + if (!params->tempfile) { + strbuf_addstr(&status->error_message, + "could not create tempfile for loose object"); + status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; + goto cleanup; + } + + fdopen_tempfile(params->tempfile, "w"); + +cleanup: + strbuf_release(&buf_path); +} + +/* + * Extract the filename portion of the given pathname. + * + * TODO Wish I could find a strbuf_filename() function for this. + */ +static void extract_filename(struct strbuf *filename, + const struct strbuf *pathname) +{ + size_t len = pathname->len; + + strbuf_setlen(filename, 0); + + while (len > 0 && !is_dir_sep(pathname->buf[len - 1])) + len--; + + strbuf_addstr(filename, &pathname->buf[len]); +} + +/* + * Convert the tempfile into a permanent .pack packfile in the ODB. + * Create the corresponding .idx file. + * + * Return the filename (not pathname) of the resulting packfile. + */ +static void install_packfile(struct gh__response_status *status, + struct tempfile **pp_tempfile, + struct strbuf *packfile_filename) +{ + struct child_process ip = CHILD_PROCESS_INIT; + struct strbuf pack_name_tmp = STRBUF_INIT; + struct strbuf pack_name_dst = STRBUF_INIT; + struct strbuf idx_name_tmp = STRBUF_INIT; + struct strbuf idx_name_dst = STRBUF_INIT; + size_t len_base; + + gh__response_status__zero(status); + + strbuf_setlen(packfile_filename, 0); + + /* + * start with ".temp" (that is owned by tempfile class). + * rename to ".pack.temp" to break ownership. + * + * create ".idx.temp" on provisional packfile. + * + * officially install both ".{pack,idx}.temp" as + * ".{pack,idx}". + */ + + strbuf_addstr(&pack_name_tmp, get_tempfile_path(*pp_tempfile)); + if (!strip_suffix(pack_name_tmp.buf, ".temp", &len_base)) { + /* + * This is more of a BUG(), but I want the error + * code propagated. + */ + strbuf_addf(&status->error_message, + "packfile tempfile does not end in '.temp': '%s'", + pack_name_tmp.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; + goto cleanup; + } + + strbuf_setlen(&pack_name_tmp, (int)len_base); + strbuf_addbuf(&pack_name_dst, &pack_name_tmp); + strbuf_addbuf(&idx_name_tmp, &pack_name_tmp); + strbuf_addbuf(&idx_name_dst, &pack_name_tmp); + + strbuf_addstr(&pack_name_tmp, ".pack.temp"); + strbuf_addstr(&pack_name_dst, ".pack"); + strbuf_addstr(&idx_name_tmp, ".idx.temp"); + strbuf_addstr(&idx_name_dst, ".idx"); + + // TODO if either pack_name_dst or idx_name_dst already + // TODO exists in the ODB, create alternate names so that + // TODO we don't step on them. + + if (rename_tempfile(pp_tempfile, pack_name_tmp.buf) == -1) { + strbuf_addf(&status->error_message, + "could not rename packfile to '%s'", + pack_name_tmp.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; + goto cleanup; + } + + argv_array_push(&ip.args, "index-pack"); + if (gh__cmd_opts.show_progress) + argv_array_push(&ip.args, "-v"); + argv_array_pushl(&ip.args, "-o", idx_name_tmp.buf, NULL); + argv_array_push(&ip.args, pack_name_tmp.buf); + ip.git_cmd = 1; + ip.no_stdin = 1; + ip.no_stdout = 1; + + // TODO consider capturing stdout from index-pack because + // TODO it will contain the SHA of the packfile and we can + // TODO (should?) add it to the .pack and .idx pathnames + // TODO when we install them. + // TODO + // TODO See pipe_command() rather than run_command(). + // TODO + // TODO Or should be SHA-it ourselves (or read the last 20 bytes)? + + /* + * Note that I DO NOT have a region around the index-pack process. + * The region in gh__run_one_slot() currently only covers the + * download time. This index-pack is a separate step not covered + * in the above region. Later, if/when we have CURL directly stream + * to index-pack, that region will be the combined download+index + * time. So, I'm not going to introduce it here. + */ + if (run_command(&ip)) { + unlink(pack_name_tmp.buf); + unlink(idx_name_tmp.buf); + strbuf_addf(&status->error_message, + "index-pack failed on '%s'", pack_name_tmp.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; + goto cleanup; + } + + if (finalize_object_file(pack_name_tmp.buf, pack_name_dst.buf) || + finalize_object_file(idx_name_tmp.buf, idx_name_dst.buf)) { + unlink(pack_name_tmp.buf); + unlink(pack_name_dst.buf); + unlink(idx_name_tmp.buf); + unlink(idx_name_dst.buf); + strbuf_addf(&status->error_message, + "could not install packfile '%s'", + pack_name_dst.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; + goto cleanup; + } + + extract_filename(packfile_filename, &pack_name_dst); + +cleanup: + child_process_clear(&ip); + strbuf_release(&pack_name_tmp); + strbuf_release(&pack_name_dst); + strbuf_release(&idx_name_tmp); + strbuf_release(&idx_name_dst); +} + +/* + * Convert the tempfile into a permanent loose object in the ODB. + */ +static void install_loose(struct gh__request_params *params, + struct gh__response_status *status) +{ + struct strbuf tmp_path = STRBUF_INIT; + + gh__response_status__zero(status); + + /* + * close tempfile to steal ownership away from tempfile class. + */ + strbuf_addstr(&tmp_path, get_tempfile_path(params->tempfile)); + close_tempfile_gently(params->tempfile); + + /* + * Try to install the tempfile as the actual loose object. + * + * If the loose object already exists, finalize_object_file() + * will NOT overwrite/replace it. It will silently eat the + * EEXIST error and unlink the tempfile as it if was + * successful. We just let it lie to us. + * + * Since our job is to back-fill missing objects needed by a + * foreground git process -- git should have called + * oid_object_info_extended() and loose_object_info() BEFORE + * asking us to download the missing object. So if we get a + * collision we have to assume something else is happening in + * parallel and we lost the race. And that's OK. + */ + if (finalize_object_file(tmp_path.buf, params->loose_path.buf)) { + unlink(tmp_path.buf); + strbuf_addf(&status->error_message, + "could not install loose object '%s'", + params->loose_path.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE; + } + + strbuf_release(&tmp_path); +} + +/* + * Our wrapper to initialize the HTTP layer. + * + * We always use the real origin server, not the cache-server, when + * initializing the http/curl layer. + */ +static void gh_http_init(void) +{ + if (gh__global.http_is_initialized) + return; + + http_init(gh__global.remote, gh__global.main_url, 0); + gh__global.http_is_initialized = 1; +} + +static void gh_http_cleanup(void) +{ + if (!gh__global.http_is_initialized) + return; + + http_cleanup(); + gh__global.http_is_initialized = 0; +} + +/* + * Do a single HTTP request without auth-retry or fallback. + */ +static void do_req(const char *url_base, + const char *url_component, + const struct credential *creds, + struct gh__request_params *params, + struct gh__response_status *status) +{ + struct active_request_slot *slot; + struct slot_results results; + struct strbuf rest_url = STRBUF_INIT; + + gh__response_status__zero(status); + + if (params->b_write_to_file) { + // TODO ftruncate tempfile ?? + } else { + strbuf_setlen(params->buffer, 0); + } + + end_url_with_slash(&rest_url, url_base); + strbuf_addstr(&rest_url, url_component); + + slot = get_active_slot(); + slot->results = &results; + + curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0); /* not a HEAD request */ + curl_easy_setopt(slot->curl, CURLOPT_URL, rest_url.buf); + curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, params->headers); + + if (params->b_is_post) { + curl_easy_setopt(slot->curl, CURLOPT_POST, 1); + curl_easy_setopt(slot->curl, CURLOPT_ENCODING, NULL); + curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDS, + params->post_payload->buf); + curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE, + params->post_payload->len); + } else { + curl_easy_setopt(slot->curl, CURLOPT_POST, 0); + } + + if (params->b_write_to_file) { + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite); + curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, + (void*)params->tempfile->fp); + } else { + curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, + fwrite_buffer); + curl_easy_setopt(slot->curl, CURLOPT_FILE, params->buffer); + } + + if (creds && creds->username) { + /* + * Force CURL to respect the username/password we provide by + * turning off the AUTH-ANY negotiation stuff. + * + * That is, CURLAUTH_ANY causes CURL to NOT send the creds + * on an initial request in order to force a 401 and let it + * negotiate the best auth scheme and then retry. + * + * This is problematic when talking to the cache-servers + * because they send a 400 (with a "A valid Basic Auth..." + * message body) rather than a 401. This means that the + * the automatic retry will never happen. And even if we + * do force a retry, CURL still won't send the creds. + * + * So we turn it off and force it use our creds. + */ + curl_easy_setopt(slot->curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC); + curl_easy_setopt(slot->curl, CURLOPT_USERNAME, creds->username); + curl_easy_setopt(slot->curl, CURLOPT_PASSWORD, creds->password); + } else { + /* + * Turn on the AUTH-ANY negotiation. This only works + * with the main Git server (because the cache-server + * doesn't handle 401s). + * + * TODO Think about if we really need to handle this case + * TODO and if so, add an .is_main to params. + */ + curl_easy_setopt(slot->curl, CURLOPT_HTTPAUTH, CURLAUTH_ANY); + } + + if (params->progress_base_phase2_msg.len || + params->progress_base_phase3_msg.len) { + curl_easy_setopt(slot->curl, CURLOPT_XFERINFOFUNCTION, + gh__curl_progress_cb); + curl_easy_setopt(slot->curl, CURLOPT_XFERINFODATA, params); + curl_easy_setopt(slot->curl, CURLOPT_NOPROGRESS, 0); + } else { + curl_easy_setopt(slot->curl, CURLOPT_NOPROGRESS, 1); + } + + gh__run_one_slot(slot, params, status); +} + +static void do_req__to_main(const char *url_component, + struct gh__request_params *params, + struct gh__response_status *status) +{ +// lookup_main_creds(); + + do_req(gh__global.main_url, url_component, &gh__global.main_creds, + params, status); + + if (status->response_code == 401) { + refresh_main_creds(); + + do_req(gh__global.main_url, url_component, &gh__global.main_creds, + params, status); + } + + if (status->response_code == 200) + approve_main_creds(); +} + +static void do_req__to_cache_server(const char *url_component, + struct gh__request_params *params, + struct gh__response_status *status) +{ + synthesize_cache_server_creds(); + + do_req(gh__global.cache_server_url, url_component, &gh__global.cache_creds, + params, status); + fixup_cache_server_400_to_401(status); + + if (status->response_code == 401) { + refresh_cache_server_creds(); + + do_req(gh__global.cache_server_url, url_component, + &gh__global.cache_creds, params, status); + fixup_cache_server_400_to_401(status); + } + + if (status->response_code == 200) + approve_cache_server_creds(); +} + +static void do_req__with_fallback(const char *url_component, + struct gh__request_params *params, + struct gh__response_status *status) +{ + if (gh__global.cache_server_url && !params->b_no_cache_server) { + do_req__to_cache_server(url_component, params, status); + + if (status->response_code == 200) + return; + + if (!gh__cmd_opts.try_fallback) + return; + + /* + * The cache-server shares creds with the main Git server, + * so if our creds failed against the cache-server, they + * will also fail against the main Git server. We just let + * this fail. + * + * Falling-back would likely just cause the 3rd (or maybe + * 4th) cred prompt. + */ + if (status->response_code == 401) + return; + } + + do_req__to_main(url_component, params, status); +} + +/* + * Call "gvfs/config" REST API. + * + * Return server's response buffer. This is probably a raw JSON string. + */ +static void do__gvfs_config(struct gh__response_status *status, + struct strbuf *config_data) +{ + struct gh__request_params params = GH__REQUEST_PARAMS_INIT; + + strbuf_addstr(¶ms.label, "GET/config"); + + params.b_is_post = 0; + params.b_write_to_file = 0; + params.b_no_cache_server = 1; /* they don't handle gvfs/config API */ + params.buffer = config_data; + + params.object_count = 1; /* a bit of a lie */ + + /* + * "X-TFS-FedAuthRedirect: Suppress" disables the 302 + 203 redirect + * sequence to a login page and forces the main Git server to send a + * normal 401. + */ + params.headers = http_copy_default_headers(); + params.headers = curl_slist_append(params.headers, + "X-TFS-FedAuthRedirect: Suppress"); + params.headers = curl_slist_append(params.headers, + "Pragma: no-cache"); + + if (gh__cmd_opts.show_progress) { + /* + * gvfs/config has a very small reqest payload, so I don't + * see any need to report progress on the upload side of + * the GET. So just report progress on the download side. + */ + strbuf_addstr(¶ms.progress_base_phase3_msg, + "Receiving gvfs/config"); + } + + do_req__with_fallback("gvfs/config", ¶ms, status); + + gh__request_params__release(¶ms); +} + +/* + * Call "gvfs/objects/" REST API to fetch a loose object + * and write it to the ODB. + */ +static void do__loose__gvfs_object(struct gh__response_status *status, + const struct object_id *oid) +{ + struct gh__request_params params = GH__REQUEST_PARAMS_INIT; + struct strbuf component_url = STRBUF_INIT; + + gh__response_status__zero(status); + + strbuf_addf(&component_url, "gvfs/objects/%s", oid_to_hex(oid)); + + strbuf_addstr(¶ms.label, "GET/objects"); + + params.b_is_post = 0; + params.b_write_to_file = 1; + params.b_no_cache_server = 0; + + params.object_count = 1; + + params.headers = http_copy_default_headers(); + params.headers = curl_slist_append(params.headers, + "X-TFS-FedAuthRedirect: Suppress"); + params.headers = curl_slist_append(params.headers, + "Pragma: no-cache"); + + create_tempfile_for_loose(¶ms, status, oid); + if (!params.tempfile) + goto cleanup; + + if (gh__cmd_opts.show_progress) { + /* + * Likewise, a gvfs/objects/{oid} has a very small reqest + * payload, so I don't see any need to report progress on + * the upload side of the GET. So just report progress + * on the download side. + */ + strbuf_addstr(¶ms.progress_base_phase3_msg, + "Receiving 1 loose object"); + } + + do_req__with_fallback(component_url.buf, ¶ms, status); + + if (status->ec == GH__ERROR_CODE__OK) + install_loose(¶ms, status); + +cleanup: + gh__request_params__release(¶ms); + strbuf_release(&component_url); +} + +/* + * Call "gvfs/objects" POST REST API to fetch a packfile containing + * the objects in the requested OIDSET. Returns the filename (not + * pathname) to the new packfile. + */ +static void do__packfile__gvfs_objects(struct gh__response_status *status, + struct oidset_iter *iter, + unsigned long nr_wanted_in_block, + struct strbuf *output_filename, + unsigned long *nr_taken) +{ + struct json_writer jw_req = JSON_WRITER_INIT; + struct gh__request_params params = GH__REQUEST_PARAMS_INIT; + + gh__response_status__zero(status); + + params.object_count = build_json_payload__gvfs_objects( + &jw_req, iter, nr_wanted_in_block); + *nr_taken = params.object_count; + + strbuf_addstr(¶ms.label, "POST/objects"); + + params.b_is_post = 1; + params.b_write_to_file = 1; + params.b_no_cache_server = 0; + + params.post_payload = &jw_req.json; + + params.headers = http_copy_default_headers(); + params.headers = curl_slist_append(params.headers, + "X-TFS-FedAuthRedirect: Suppress"); + params.headers = curl_slist_append(params.headers, + "Pragma: no-cache"); + params.headers = curl_slist_append(params.headers, + "Content-Type: application/json"); + /* + * We really always want a packfile. But if the payload only + * requests 1 OID, the server will/may send us a single loose + * objects instead. (Apparently the server ignores us when we + * only send application/x-git-packfile and does it anyway.) + * + * So to make it clear to my future self, go ahead and add + * an accept header for loose objects and own it. + */ + params.headers = curl_slist_append(params.headers, + "Accept: application/x-git-packfile"); + params.headers = curl_slist_append(params.headers, + "Accept: application/x-git-loose-object"); + + params.tempfile = create_tempfile_for_packfile(); + if (!params.tempfile) { + strbuf_addstr(&status->error_message, + "could not create tempfile for packfile"); + status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; + goto cleanup; + } + + if (gh__cmd_opts.show_progress) { + strbuf_addf(¶ms.progress_base_phase2_msg, + "Requesting packfile with %ld objects", + params.object_count); + strbuf_addf(¶ms.progress_base_phase3_msg, + "Receiving packfile with %ld objects", + params.object_count); + } + + do_req__with_fallback("gvfs/objects", ¶ms, status); + + if (status->ec == GH__ERROR_CODE__OK) { + if (!strcmp(status->content_type.buf, + "application/x-git-packfile")) { + + // TODO Consider having a worker thread to manage + // TODO running index-pack and then install the + // TODO resulting .idx and .pack files. This would + // TODO let us interleave those steps with our thread + // TODO fetching the next block of objects from the + // TODO server. (Need to think about how progress + // TODO messages from our thread and index-pack + // TODO would mesh.) + // TODO + // TODO But then again, if we hack index-pack to write + // TODO to our alternate and stream the data thru it, + // TODO it won't matter. + + install_packfile(status, ¶ms.tempfile, + output_filename); + goto cleanup; + } + + if (!strcmp(status->content_type.buf, + "application/x-git-loose-object")) + { + /* + * This should not happen (when we request + * more than one object). The server can send + * us a loose object (even when we use the + * POST form) if there is only one object in + * the payload (and despite the set of accept + * headers we send), so I'm going to leave + * this here. + */ + strbuf_addstr(&status->error_message, + "received loose object when packfile expected"); + status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; + goto cleanup; + } + + strbuf_addf(&status->error_message, + "received unknown content-type '%s'", + status->content_type.buf); + status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; + goto cleanup; + } + +cleanup: + gh__request_params__release(¶ms); + jw_release(&jw_req); +} + +/* + * Bulk or individually fetch a list of objects in one or more http requests. + * Create one or more packfiles and/or loose objects. + * + * We accumulate results for each request in `result_list` until we get a + * hard error and have to stop. + */ +static void do_fetch_oidset(struct gh__response_status *status, + struct oidset *oids, + unsigned long nr_total, + struct string_list *result_list) +{ + struct oidset_iter iter; + struct strbuf output_filename = STRBUF_INIT; + struct strbuf msg = STRBUF_INIT; + struct strbuf err404 = STRBUF_INIT; + const struct object_id *oid; + unsigned long k; + unsigned long nr_taken; + int had_404 = 0; + + gh__response_status__zero(status); + if (!nr_total) + return; + + oidset_iter_init(oids, &iter); + + for (k = 0; k < nr_total; k += nr_taken) { + if (nr_total - k == 1 || gh__cmd_opts.block_size == 1) { + oid = oidset_iter_next(&iter); + nr_taken = 1; + + do__loose__gvfs_object(status, oid); + + /* + * If we get a 404 for an individual object, ignore + * it and get the rest. We'll fixup the 'ec' later. + */ + if (status->ec == GH__ERROR_CODE__HTTP_404) { + if (!err404.len) + strbuf_addf(&err404, "%s: loose object %s", + status->error_message.buf, + oid_to_hex(oid)); + /* + * Mark the fetch as "incomplete", but don't + * stop trying to get other chunks. + */ + had_404 = 1; + continue; + } + + if (status->ec != GH__ERROR_CODE__OK) { + /* Stop at the first hard error. */ + strbuf_addf(&status->error_message, ": loose %s", + oid_to_hex(oid)); + goto cleanup; + } + + strbuf_setlen(&msg, 0); + strbuf_addf(&msg, "loose %s", oid_to_hex(oid)); + string_list_append(result_list, msg.buf); + + } else { + strbuf_setlen(&output_filename, 0); + + do__packfile__gvfs_objects(status, &iter, + gh__cmd_opts.block_size, + &output_filename, + &nr_taken); + + /* + * Because the oidset iterator has random + * order, it does no good to say the k-th or + * n-th chunk was incomplete; the client + * cannot use that index for anything. + * + * We get a 404 when at least one object in + * the chunk was not found. + * + * TODO Consider various retry strategies (such as + * TODO loose or bisect) on the members within this + * TODO chunk to reduce the impact of the miss. + * + * For now, ignore the 404 and go on to the + * next chunk and then fixup the 'ec' later. + */ + if (status->ec == GH__ERROR_CODE__HTTP_404) { + if (!err404.len) + strbuf_addf(&err404, + "%s: packfile object", + status->error_message.buf); + /* + * Mark the fetch as "incomplete", but don't + * stop trying to get other chunks. + */ + had_404 = 1; + continue; + } + + if (status->ec != GH__ERROR_CODE__OK) { + /* Stop at the first hard error. */ + strbuf_addstr(&status->error_message, + ": in packfile"); + goto cleanup; + } + + strbuf_setlen(&msg, 0); + strbuf_addf(&msg, "packfile %s", output_filename.buf); + string_list_append(result_list, msg.buf); + } + } + +cleanup: + strbuf_release(&msg); + strbuf_release(&err404); + strbuf_release(&output_filename); + + if (had_404 && status->ec == GH__ERROR_CODE__OK) { + strbuf_setlen(&status->error_message, 0); + strbuf_addstr(&status->error_message, "404 Not Found"); + status->ec = GH__ERROR_CODE__HTTP_404; + } +} + +/* + * Finish with initialization. This happens after the main option + * parsing, dispatch to sub-command, and sub-command option parsing + * and before actually doing anything. + * + * Optionally configure the cache-server if the sub-command will + * use it. + */ +static void finish_init(int setup_cache_server) +{ + select_odb(); + + lookup_main_url(); + gh_http_init(); + + if (setup_cache_server) + select_cache_server(); +} + +/* + * Request gvfs/config from main Git server. (Config data is not + * available from a GVFS cache-server.) + * + * Print the received server configuration (as the raw JSON string). + */ +static enum gh__error_code do_sub_cmd__config(int argc, const char **argv) +{ + struct gh__response_status status = GH__RESPONSE_STATUS_INIT; + struct strbuf config_data = STRBUF_INIT; + enum gh__error_code ec = GH__ERROR_CODE__OK; + + trace2_cmd_mode("config"); + + finish_init(0); + + do__gvfs_config(&status, &config_data); + ec = status.ec; + + if (ec == GH__ERROR_CODE__OK) + printf("%s\n", config_data.buf); + else + error("config: %s", status.error_message.buf); + + gh__response_status__release(&status); + strbuf_release(&config_data); + + return ec; +} + +/* + * Read a list of objects from stdin and fetch them in a single request (or + * multiple block-size requests). + */ +static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) +{ + static struct option get_options[] = { + OPT_MAGNITUDE('b', "block-size", &gh__cmd_opts.block_size, + N_("number of objects to request at a time")), + OPT_INTEGER('d', "depth", &gh__cmd_opts.depth, + N_("Commit depth")), + OPT_END(), + }; + + struct gh__response_status status = GH__RESPONSE_STATUS_INIT; + struct oidset oids = OIDSET_INIT; + struct string_list result_list = STRING_LIST_INIT_DUP; + enum gh__error_code ec = GH__ERROR_CODE__OK; + unsigned long nr_total; + int k; + + trace2_cmd_mode("get"); + + if (argc > 1 && !strcmp(argv[1], "-h")) + usage_with_options(get_usage, get_options); + + argc = parse_options(argc, argv, NULL, get_options, get_usage, 0); + if (gh__cmd_opts.depth < 1) + gh__cmd_opts.depth = 1; + + finish_init(1); + + nr_total = read_stdin_from_rev_list(&oids); + + trace2_region_enter("gvfs-helper", "get", NULL); + trace2_data_intmax("gvfs-helper", NULL, "get/nr_objects", nr_total); + do_fetch_oidset(&status, &oids, nr_total, &result_list); + trace2_region_leave("gvfs-helper", "get", NULL); + + ec = status.ec; + + for (k = 0; k < result_list.nr; k++) + printf("%s\n", result_list.items[k].string); + + if (ec != GH__ERROR_CODE__OK) + error("get: %s", status.error_message.buf); + + gh__response_status__release(&status); + oidset_clear(&oids); + string_list_clear(&result_list, 0); + + return ec; +} + +/* + * Handle the 'get' command when in "server mode". Only call error() + * for hard errors where we cannot communicate correctly with the foreground + * client process. Pass any actual data errors (such as 404's or 401's from + * the fetch back to the client process. + */ +static enum gh__error_code do_server_subprocess_get(void) +{ + struct gh__response_status status = GH__RESPONSE_STATUS_INIT; + struct oidset oids = OIDSET_INIT; + struct object_id oid; + struct string_list result_list = STRING_LIST_INIT_DUP; + enum gh__error_code ec = GH__ERROR_CODE__OK; + char *line; + int len; + int err; + int k; + unsigned long nr_total = 0; + + /* + * Inside the "get" command, we expect a list of OIDs + * and a flush. + */ + while (1) { + len = packet_read_line_gently(0, NULL, &line); + if (len < 0 || !line) + break; + + if (get_oid_hex(line, &oid)) { + error("server: invalid oid syntax '%s'", line); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + goto cleanup; + } + + if (!oidset_insert(&oids, &oid)) + nr_total++; + } + + if (!nr_total) { + if (packet_write_fmt_gently(1, "ok\n")) { + error("server: cannot write 'get' result to client"); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + } else + ec = GH__ERROR_CODE__OK; + goto cleanup; + } + + trace2_region_enter("gvfs-helper", "server/get", NULL); + trace2_data_intmax("gvfs-helper", NULL, "server/get/nr_objects", nr_total); + do_fetch_oidset(&status, &oids, nr_total, &result_list); + trace2_region_leave("gvfs-helper", "server/get", NULL); + + /* + * Write pathname of the ODB where we wrote all of the objects + * we fetched. + */ + if (packet_write_fmt_gently(1, "odb %s\n", + gh__global.buf_odb_path.buf)) { + error("server: cannot write 'odb' to client"); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + goto cleanup; + } + + for (k = 0; k < result_list.nr; k++) + if (packet_write_fmt_gently(1, "%s\n", + result_list.items[k].string)) + { + error("server: cannot write result to client: '%s'", + result_list.items[k].string); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + goto cleanup; + } + + err = 0; + if (ec == GH__ERROR_CODE__OK) + err = packet_write_fmt_gently(1, "ok\n"); + else if (ec == GH__ERROR_CODE__HTTP_404) + err = packet_write_fmt_gently(1, "partial\n"); + else + err = packet_write_fmt_gently(1, "error %s\n", + status.error_message.buf); + if (err) { + error("server: cannot write result to client"); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + goto cleanup; + } + + if (packet_flush_gently(1)) { + error("server: cannot flush result to client"); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + goto cleanup; + } + +cleanup: + oidset_clear(&oids); + string_list_clear(&result_list, 0); + + return ec; +} + +typedef enum gh__error_code (fn_subprocess_cmd)(void); + +struct subprocess_capability { + const char *name; + int client_has; + fn_subprocess_cmd *pfn; +}; + +static struct subprocess_capability caps[] = { + { "get", 0, do_server_subprocess_get }, + { NULL, 0, NULL }, +}; + +/* + * Handle the subprocess protocol handshake as described in: + * [] Documentation/technical/protocol-common.txt + * [] Documentation/technical/long-running-process-protocol.txt + */ +static int do_protocol_handshake(void) +{ +#define OUR_SUBPROCESS_VERSION "1" + + char *line; + int len; + int k; + int b_support_our_version = 0; + + len = packet_read_line_gently(0, NULL, &line); + if (len < 0 || !line || strcmp(line, "gvfs-helper-client")) { + error("server: subprocess welcome handshake failed: %s", line); + return -1; + } + + while (1) { + const char *v; + len = packet_read_line_gently(0, NULL, &line); + if (len < 0 || !line) + break; + if (!skip_prefix(line, "version=", &v)) { + error("server: subprocess version handshake failed: %s", + line); + return -1; + } + b_support_our_version |= (!strcmp(v, OUR_SUBPROCESS_VERSION)); + } + if (!b_support_our_version) { + error("server: client does not support our version: %s", + OUR_SUBPROCESS_VERSION); + return -1; + } + + if (packet_write_fmt_gently(1, "gvfs-helper-server\n") || + packet_write_fmt_gently(1, "version=%s\n", + OUR_SUBPROCESS_VERSION) || + packet_flush_gently(1)) { + error("server: cannot write version handshake"); + return -1; + } + + while (1) { + const char *v; + int k; + + len = packet_read_line_gently(0, NULL, &line); + if (len < 0 || !line) + break; + if (!skip_prefix(line, "capability=", &v)) { + error("server: subprocess capability handshake failed: %s", + line); + return -1; + } + for (k = 0; caps[k].name; k++) + if (!strcmp(v, caps[k].name)) + caps[k].client_has = 1; + } + + for (k = 0; caps[k].name; k++) + if (caps[k].client_has) + if (packet_write_fmt_gently(1, "capability=%s\n", + caps[k].name)) { + error("server: cannot write capabilities handshake: %s", + caps[k].name); + return -1; + } + if (packet_flush_gently(1)) { + error("server: cannot write capabilities handshake"); + return -1; + } + + return 0; +} + +/* + * Interactively listen to stdin for a series of commands and execute them. + */ +static enum gh__error_code do_sub_cmd__server(int argc, const char **argv) +{ + static struct option server_options[] = { + OPT_MAGNITUDE('b', "block-size", &gh__cmd_opts.block_size, + N_("number of objects to request at a time")), + OPT_INTEGER('d', "depth", &gh__cmd_opts.depth, + N_("Commit depth")), + OPT_END(), + }; + + enum gh__error_code ec = GH__ERROR_CODE__OK; + char *line; + int len; + int k; + + trace2_cmd_mode("server"); + + if (argc > 1 && !strcmp(argv[1], "-h")) + usage_with_options(server_usage, server_options); + + argc = parse_options(argc, argv, NULL, server_options, server_usage, 0); + if (gh__cmd_opts.depth < 1) + gh__cmd_opts.depth = 1; + + finish_init(1); + + if (do_protocol_handshake()) { + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + goto cleanup; + } + +top_of_loop: + while (1) { + len = packet_read_line_gently(0, NULL, &line); + if (len < 0 || !line) { + /* use extra FLUSH as a QUIT */ + ec = GH__ERROR_CODE__OK; + goto cleanup; + } + + for (k = 0; caps[k].name; k++) { + if (caps[k].client_has && !strcmp(line, caps[k].name)) { + ec = (caps[k].pfn)(); + if (ec != GH__ERROR_CODE__OK) + goto cleanup; + goto top_of_loop; + } + } + + error("server: unknown command '%s'", line); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + goto cleanup; + } + +cleanup: + return ec; +} + +static enum gh__error_code do_sub_cmd(int argc, const char **argv) +{ + if (!strcmp(argv[0], "get")) + return do_sub_cmd__get(argc, argv); + + if (!strcmp(argv[0], "config")) + return do_sub_cmd__config(argc, argv); + + if (!strcmp(argv[0], "server")) + return do_sub_cmd__server(argc, argv); + + // TODO have "test" mode that could be used to drive + // TODO unit testing. + + return GH__ERROR_CODE__USAGE; +} + +/* + * Communicate with the primary Git server or a GVFS cache-server using the + * GVFS Protocol. + * + * https://github.com/microsoft/VFSForGit/blob/master/Protocol.md + */ +int cmd_main(int argc, const char **argv) +{ + static struct option main_options[] = { + OPT_STRING('r', "remote", &gh__cmd_opts.remote_name, + N_("remote"), + N_("Remote name")), + OPT_BOOL('f', "fallback", &gh__cmd_opts.try_fallback, + N_("Fallback to Git server if cache-server fails")), + OPT_CALLBACK(0, "cache-server", NULL, + N_("cache-server"), + N_("cache-server=disable|trust|verify|error"), + option_parse_cache_server_mode), + OPT_CALLBACK(0, "shared-cache", NULL, + N_("pathname"), + N_("Pathname to shared objects directory"), + option_parse_shared_cache_directory), + OPT_BOOL('p', "progress", &gh__cmd_opts.show_progress, + N_("Show progress")), + OPT_END(), + }; + + enum gh__error_code ec = GH__ERROR_CODE__OK; + + if (argc > 1 && !strcmp(argv[1], "-h")) + usage_with_options(main_usage, main_options); + + trace2_cmd_name("gvfs-helper"); + + setup_git_directory_gently(NULL); + + git_config(git_default_config, NULL); + + /* Set any non-zero initial values in gh__cmd_opts. */ + gh__cmd_opts.depth = 1; + gh__cmd_opts.block_size = GH__DEFAULT_BLOCK_SIZE; + gh__cmd_opts.show_progress = !!isatty(2); + + argc = parse_options(argc, argv, NULL, main_options, main_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + if (argc == 0) + usage_with_options(main_usage, main_options); + + ec = do_sub_cmd(argc, argv); + + gh_http_cleanup(); + + if (ec == GH__ERROR_CODE__USAGE) + usage_with_options(main_usage, main_options); + + return ec; +} diff --git a/promisor-remote.c b/promisor-remote.c index baaea12fd69b3b..2913221ddad8b4 100644 --- a/promisor-remote.c +++ b/promisor-remote.c @@ -3,6 +3,7 @@ #include "promisor-remote.h" #include "config.h" #include "transport.h" +#include "gvfs-helper-client.h" static char *repository_format_partial_clone; static const char *core_partial_clone_filter_default; @@ -37,6 +38,7 @@ static int fetch_objects(const char *remote_name, struct ref *ref = NULL; int i; + for (i = 0; i < oid_nr; i++) { struct ref *new_ref = alloc_ref(oid_to_hex(&oids[i])); oidcpy(&new_ref->old_oid, &oids[i]); @@ -196,7 +198,7 @@ struct promisor_remote *promisor_remote_find(const char *remote_name) int has_promisor_remote(void) { - return !!promisor_remote_find(NULL); + return core_use_gvfs_helper || !!promisor_remote_find(NULL); } static int remove_fetched_oids(struct repository *repo, @@ -243,6 +245,13 @@ int promisor_remote_get_direct(struct repository *repo, if (oid_nr == 0) return 0; + if (core_use_gvfs_helper) { + enum gh_client__created ghc = GHC__CREATED__NOTHING; + + trace2_data_intmax("bug", the_repository, "fetch_objects/gvfs-helper", oid_nr); + gh_client__queue_oid_array(oids, oid_nr); + return gh_client__drain_queue(&ghc); + } promisor_remote_init(); diff --git a/sha1-file.c b/sha1-file.c index 91e58f144eeb61..2af2afa44491a7 100644 --- a/sha1-file.c +++ b/sha1-file.c @@ -35,6 +35,7 @@ #include "sigchain.h" #include "sub-process.h" #include "pkt-line.h" +#include "gvfs-helper-client.h" /* The maximum size for an object header. */ #define MAX_HEADER_LEN 32 @@ -1601,7 +1602,7 @@ static int do_oid_object_info_extended(struct repository *r, const struct object_id *real = oid; int already_retried = 0; int tried_hook = 0; - + int tried_gvfs_helper = 0; if (flags & OBJECT_INFO_LOOKUP_REPLACE) real = lookup_replace_object(r, oid); @@ -1642,13 +1643,41 @@ static int do_oid_object_info_extended(struct repository *r, if (!loose_object_info(r, real, oi, flags)) return 0; + if (core_use_gvfs_helper && !tried_gvfs_helper) { + enum gh_client__created ghc; + + if (flags & OBJECT_INFO_SKIP_FETCH_OBJECT) + return -1; + + gh_client__get_immediate(real, &ghc); + tried_gvfs_helper = 1; + + /* + * Retry the lookup IIF `gvfs-helper` created one + * or more new packfiles or loose objects. + */ + if (ghc != GHC__CREATED__NOTHING) + continue; + + /* + * If `gvfs-helper` fails, we just want to return -1. + * But allow the other providers to have a shot at it. + * (At least until we have a chance to consolidate + * them.) + */ + } + /* Not a loose object; someone else may have just packed it. */ if (!(flags & OBJECT_INFO_QUICK)) { reprepare_packed_git(r); if (find_pack_entry(r, real, &e)) break; if (core_virtualize_objects && !tried_hook) { + // TODO Assert or at least trace2 if gvfs-helper + // TODO was tried and failed and then read-object-hook + // TODO is successful at getting this object. tried_hook = 1; + // TODO BUG? Should 'oid' be 'real' ? if (!read_object_process(oid)) goto retry; } diff --git a/t/helper/.gitignore b/t/helper/.gitignore index 48c7bb0bbb1dc8..1a736631e8f94a 100644 --- a/t/helper/.gitignore +++ b/t/helper/.gitignore @@ -1,4 +1,5 @@ /test-tool /test-fake-ssh +/test-gvfs-protocol /test-line-buffer /test-svn-fe From 1cf9d052fd46c1bdc51070e8d03dd5eb85e5ac9c Mon Sep 17 00:00:00 2001 From: Derrick Stolee Date: Fri, 4 Oct 2019 15:30:34 -0400 Subject: [PATCH 075/104] gvfs-helper: fix race condition when creating loose object dirs When two gvfs-helper processes are the first to create a loose object directory, the processes (A and B in the timeline below) could have the following race: 1. A sees that the directory does not exist. 2. B sees that the directory does not exist. 3. A creates the directory with success. 4. B fails to create the directory and fails. Instead of having B fail here, just check for the directory's existence before reporting an error. That solves the race and allows tests to pass. Signed-off-by: Derrick Stolee --- gvfs-helper.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gvfs-helper.c b/gvfs-helper.c index 60ae7a8a62fa89..d2cc087d74284c 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -1049,7 +1049,9 @@ static void create_tempfile_for_loose( strbuf_complete(&buf_path, '/'); strbuf_add(&buf_path, hex, 2); - if (!file_exists(buf_path.buf) && mkdir(buf_path.buf, 0777) == -1) { + if (!file_exists(buf_path.buf) && + mkdir(buf_path.buf, 0777) == -1 && + !file_exists(buf_path.buf)) { strbuf_addf(&status->error_message, "cannot create directory for loose object '%s'", buf_path.buf); From 0cee3acba95070420ebee4b74efc8c549b5afed3 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Mon, 7 Oct 2019 13:55:42 -0400 Subject: [PATCH 076/104] sha1-file: create shared-cache directory if it doesn't exist The config variable `gvfs.sharedCache` contains the pathname to an alternate that will be used by `gvfs-helper` to store dynamically-fetched missing objects. If this directory does not exist on disk, `prepare_alt_odb()` omits this directory from the in-memory list of alternates. This causes `git` commands (and `gvfs-helper` in particular) to fall-back to `.git/objects` for storage of these objects. This disables the shared-cache and leads to poorer performance. Teach `alt_obj_usable()` and `prepare_alt_odb()`, match up the directory named in `gvfs.sharedCache` with an entry in `.git/objects/info/alternates` and force-create the `` root directory (and the associated `/pack` directory) if necessary. If the value of `gvfs.sharedCache` refers to a directory that is NOT listed as an alternate, create an in-memory alternate entry in the odb-list. (This is similar to how GIT_ALTERNATE_OBJECT_DIRECTORIES works.) This work happens the first time that `prepare_alt_odb()` is called. Furthermore, teach the `--shared-cache=` command line option in `gvfs-helper` (which is runs after the first call to `prepare_alt_odb()`) to override the inherited shared-cache (and again, create the ODB directory if necessary). Signed-off-by: Jeff Hostetler --- cache.h | 2 +- config.c | 12 ++--- environment.c | 2 +- gvfs-helper-client.c | 19 ++++++++ gvfs-helper.c | 107 +++++++++++++++++++++++++++++++++---------- sha1-file.c | 75 ++++++++++++++++++++++++++++++ 6 files changed, 184 insertions(+), 33 deletions(-) diff --git a/cache.h b/cache.h index ebad31e272ccec..4004d27b7dd606 100644 --- a/cache.h +++ b/cache.h @@ -963,7 +963,7 @@ extern int protect_ntfs; extern const char *core_fsmonitor; extern int core_use_gvfs_helper; extern const char *gvfs_cache_server_url; -extern const char *gvfs_shared_cache_pathname; +extern struct strbuf gvfs_shared_cache_pathname; extern int core_apply_sparse_checkout; extern int core_sparse_checkout_cone; diff --git a/config.c b/config.c index 7b2e5d87b36ffd..fc69f7a6a999fc 100644 --- a/config.c +++ b/config.c @@ -1523,19 +1523,17 @@ static int git_default_gvfs_config(const char *var, const char *value) } if (!strcmp(var, "gvfs.sharedcache") && value && *value) { - struct strbuf buf = STRBUF_INIT; - strbuf_addstr(&buf, value); - if (strbuf_normalize_path(&buf) < 0) { + strbuf_setlen(&gvfs_shared_cache_pathname, 0); + strbuf_addstr(&gvfs_shared_cache_pathname, value); + if (strbuf_normalize_path(&gvfs_shared_cache_pathname) < 0) { /* * Pretend it wasn't set. This will cause us to * fallback to ".git/objects" effectively. */ - strbuf_release(&buf); + strbuf_release(&gvfs_shared_cache_pathname); return 0; } - strbuf_trim_trailing_dir_sep(&buf); - - gvfs_shared_cache_pathname = strbuf_detach(&buf, NULL); + strbuf_trim_trailing_dir_sep(&gvfs_shared_cache_pathname); return 0; } diff --git a/environment.c b/environment.c index 398455fb9318b7..ced15492b7e0c1 100644 --- a/environment.c +++ b/environment.c @@ -89,7 +89,7 @@ int protect_ntfs = PROTECT_NTFS_DEFAULT; const char *core_fsmonitor; int core_use_gvfs_helper; const char *gvfs_cache_server_url; -const char *gvfs_shared_cache_pathname; +struct strbuf gvfs_shared_cache_pathname = STRBUF_INIT; /* * The character that begins a commented line in user-editable file diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c index 653b57dfd01563..41e05f4ce4b6c2 100644 --- a/gvfs-helper-client.c +++ b/gvfs-helper-client.c @@ -227,13 +227,32 @@ static int gh_client__get__receive_response( return err; } +/* + * Select the preferred ODB for fetching missing objects. + * This should be the alternate with the same directory + * name as set in `gvfs.sharedCache`. + * + * Fallback to .git/objects if necessary. + */ static void gh_client__choose_odb(void) { + struct object_directory *odb; + if (gh_client__chosen_odb) return; prepare_alt_odb(the_repository); gh_client__chosen_odb = the_repository->objects->odb; + + if (!gvfs_shared_cache_pathname.len) + return; + + for (odb = the_repository->objects->odb->next; odb; odb = odb->next) { + if (!strcmp(odb->path, gvfs_shared_cache_pathname.buf)) { + gh_client__chosen_odb = odb; + return; + } + } } static int gh_client__get(enum gh_client__created *p_ghc) diff --git a/gvfs-helper.c b/gvfs-helper.c index d2cc087d74284c..d2d7551dfbbf4a 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -81,10 +81,11 @@ // // Fetch 1 or more objects. If a cache-server is configured, // try it first. Optionally fallback to the main Git server. +// // Create 1 or more loose objects and/or packfiles in the -// requested shared-cache directory (given on the command -// line and which is reported at the beginning of the -// response). +// shared-cache ODB. (The pathname of the selected ODB is +// reported at the beginning of the response; this should +// match the pathname given on the command line). // // git> get // git> @@ -632,26 +633,88 @@ static int option_parse_cache_server_mode(const struct option *opt, } /* - * Let command line args override "gvfs.sharedcache" config setting. + * Let command line args override "gvfs.sharedcache" config setting + * and override the value set by git_default_config(). + * + * The command line is parsed *AFTER* the config is loaded, so + * prepared_alt_odb() has already been called any default or inherited + * shared-cache has already been set. * - * It would be nice to move this to parse-options.c as an - * OPTION_PATHNAME handler. And maybe have flags for exists() - * and is_directory(). + * We have a chance to override it here. */ static int option_parse_shared_cache_directory(const struct option *opt, const char *arg, int unset) { + struct strbuf buf_arg = STRBUF_INIT; + if (unset) /* should not happen */ return error(_("missing value for switch '%s'"), opt->long_name); - if (!is_directory(arg)) - return error(_("value for switch '%s' is not a directory: '%s'"), - opt->long_name, arg); + strbuf_addstr(&buf_arg, arg); + if (strbuf_normalize_path(&buf_arg) < 0) { + /* + * Pretend command line wasn't given. Use whatever + * settings we already have from the config. + */ + strbuf_release(&buf_arg); + return 0; + } + strbuf_trim_trailing_dir_sep(&buf_arg); + + if (!strbuf_cmp(&buf_arg, &gvfs_shared_cache_pathname)) { + /* + * The command line argument matches what we got from + * the config, so we're already setup correctly. (And + * we have already verified that the directory exists + * on disk.) + */ + strbuf_release(&buf_arg); + return 0; + } + + else if (!gvfs_shared_cache_pathname.len) { + /* + * A shared-cache was requested and we did not inherit one. + * Try it, but let alt_odb_usabe() secretly disable it if + * it cannot create the directory on disk. + */ + strbuf_addbuf(&gvfs_shared_cache_pathname, &buf_arg); - gvfs_shared_cache_pathname = arg; + add_to_alternates_memory(buf_arg.buf); - return 0; + strbuf_release(&buf_arg); + return 0; + } + + else { + /* + * The requested shared-cache is different from the one + * we inherited. Replace the inherited value with this + * one, but smartly fallback if necessary. + */ + struct strbuf buf_prev = STRBUF_INIT; + + strbuf_addbuf(&buf_prev, &gvfs_shared_cache_pathname); + + strbuf_setlen(&gvfs_shared_cache_pathname, 0); + strbuf_addbuf(&gvfs_shared_cache_pathname, &buf_arg); + + add_to_alternates_memory(buf_arg.buf); + + /* + * alt_odb_usabe() releases gvfs_shared_cache_pathname + * if it cannot create the directory on disk, so fallback + * to the previous choice when it fails. + */ + if (!gvfs_shared_cache_pathname.len) + strbuf_addbuf(&gvfs_shared_cache_pathname, + &buf_prev); + + strbuf_release(&buf_arg); + strbuf_release(&buf_prev); + return 0; + } } /* @@ -949,24 +1012,20 @@ static void approve_cache_server_creds(void) } /* - * Select the ODB directory where we will write objects that we - * download. If was given on the command line or define in the - * config, use the local ODB (in ".git/objects"). + * Get the pathname to the ODB where we write objects that we download. */ static void select_odb(void) { - const char *odb_path = NULL; + prepare_alt_odb(the_repository); strbuf_init(&gh__global.buf_odb_path, 0); - if (gvfs_shared_cache_pathname && *gvfs_shared_cache_pathname) - odb_path = gvfs_shared_cache_pathname; - else { - prepare_alt_odb(the_repository); - odb_path = the_repository->objects->odb->path; - } - - strbuf_addstr(&gh__global.buf_odb_path, odb_path); + if (gvfs_shared_cache_pathname.len) + strbuf_addbuf(&gh__global.buf_odb_path, + &gvfs_shared_cache_pathname); + else + strbuf_addstr(&gh__global.buf_odb_path, + the_repository->objects->odb->path); } /* diff --git a/sha1-file.c b/sha1-file.c index 2af2afa44491a7..fcd66821805cc6 100644 --- a/sha1-file.c +++ b/sha1-file.c @@ -460,6 +460,8 @@ const char *loose_object_path(struct repository *r, struct strbuf *buf, return odb_loose_path(r->objects->odb, buf, oid); } +static int gvfs_matched_shared_cache_to_alternate; + /* * Return non-zero iff the path is usable as an alternate object database. */ @@ -469,6 +471,52 @@ static int alt_odb_usable(struct raw_object_store *o, { struct object_directory *odb; + if (!strbuf_cmp(path, &gvfs_shared_cache_pathname)) { + /* + * `gvfs.sharedCache` is the preferred alternate that we + * will use with `gvfs-helper.exe` to dynamically fetch + * missing objects. It is set during git_default_config(). + * + * Make sure the directory exists on disk before we let the + * stock code discredit it. + */ + struct strbuf buf_pack_foo = STRBUF_INIT; + enum scld_error scld; + + /* + * Force create the "" and "/pack" directories, if + * not present on disk. Append an extra bogus directory to + * get safe_create_leading_directories() to see "/pack" + * as a leading directory of something deeper (which it + * won't create). + */ + strbuf_addf(&buf_pack_foo, "%s/pack/foo", path->buf); + + scld = safe_create_leading_directories(buf_pack_foo.buf); + if (scld != SCLD_OK && scld != SCLD_EXISTS) { + error_errno(_("could not create shared-cache ODB '%s'"), + gvfs_shared_cache_pathname.buf); + + strbuf_release(&buf_pack_foo); + + /* + * Pretend no shared-cache was requested and + * effectively fallback to ".git/objects" for + * fetching missing objects. + */ + strbuf_release(&gvfs_shared_cache_pathname); + return 0; + } + + /* + * We know that there is an alternate (either from + * .git/objects/info/alternates or from a memory-only + * entry) associated with the shared-cache directory. + */ + gvfs_matched_shared_cache_to_alternate++; + strbuf_release(&buf_pack_foo); + } + /* Detect cases where alternate disappeared */ if (!is_directory(path->buf)) { error(_("object directory %s does not exist; " @@ -879,6 +927,33 @@ void prepare_alt_odb(struct repository *r) link_alt_odb_entries(r, r->objects->alternate_db, PATH_SEP, NULL, 0); read_info_alternates(r, r->objects->odb->path, 0); + + if (gvfs_shared_cache_pathname.len && + !gvfs_matched_shared_cache_to_alternate) { + /* + * There is no entry in .git/objects/info/alternates for + * the requested shared-cache directory. Therefore, the + * odb-list does not contain this directory. + * + * Force this directory into the odb-list as an in-memory + * alternate. Implicitly create the directory on disk, if + * necessary. + * + * See GIT_ALTERNATE_OBJECT_DIRECTORIES for another example + * of this kind of usage. + * + * Note: This has the net-effect of allowing Git to treat + * `gvfs.sharedCache` as an unofficial alternate. This + * usage should be discouraged for compatbility reasons + * with other tools in the overall Git ecosystem (that + * won't know about this trick). It would be much better + * for us to update .git/objects/info/alternates instead. + * The code here is considered a backstop. + */ + link_alt_odb_entries(r, gvfs_shared_cache_pathname.buf, + '\n', NULL, 0); + } + r->objects->loaded_alternates = 1; } From 6e47e3d58711e8d10dd704690bbb5b4f55847c31 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 8 Oct 2019 14:01:26 -0400 Subject: [PATCH 077/104] gvfs-helper: better handling of network errors Add trace2 message for CURL and HTTP errors. Fix typo reporting network error code back to gvfs-helper-client. Signed-off-by: Jeff Hostetler --- gvfs-helper.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/gvfs-helper.c b/gvfs-helper.c index d2d7551dfbbf4a..be7b154e9fdfb3 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -405,10 +405,16 @@ static void gh__response_status__set_from_slot( strbuf_addf(&status->error_message, "%s (curl)", curl_easy_strerror(status->curl_code)); status->ec = GH__ERROR_CODE__CURL_ERROR; + + trace2_data_string("gvfs-helper", NULL, + "error/curl", status->error_message.buf); } else { strbuf_addf(&status->error_message, "HTTP %ld Unexpected", status->response_code); status->ec = GH__ERROR_CODE__HTTP_UNEXPECTED_CODE; + + trace2_data_string("gvfs-helper", NULL, + "error/http", status->error_message.buf); } if (status->ec != GH__ERROR_CODE__OK) @@ -1968,7 +1974,7 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) } /* - * Handle the 'get' command when in "server mode". Only call error() + * Handle the 'get' command when in "server mode". Only call error() and set ec * for hard errors where we cannot communicate correctly with the foreground * client process. Pass any actual data errors (such as 404's or 401's from * the fetch back to the client process. @@ -2040,10 +2046,15 @@ static enum gh__error_code do_server_subprocess_get(void) goto cleanup; } + /* + * We only use status.ec to tell the client whether the request + * was complete, incomplete, or had IO errors. We DO NOT return + * this value to our caller. + */ err = 0; - if (ec == GH__ERROR_CODE__OK) + if (status.ec == GH__ERROR_CODE__OK) err = packet_write_fmt_gently(1, "ok\n"); - else if (ec == GH__ERROR_CODE__HTTP_404) + else if (status.ec == GH__ERROR_CODE__HTTP_404) err = packet_write_fmt_gently(1, "partial\n"); else err = packet_write_fmt_gently(1, "error %s\n", @@ -2270,6 +2281,7 @@ int cmd_main(int argc, const char **argv) usage_with_options(main_usage, main_options); trace2_cmd_name("gvfs-helper"); + packet_trace_identity("gvfs-helper"); setup_git_directory_gently(NULL); From a52413b15f9643eddc8951f958640d610b3cc61e Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 8 Oct 2019 14:30:25 -0400 Subject: [PATCH 078/104] gvfs-helper-client: properly update loose cache with fetched OID Fix parsing of the "loose " response from `gvfs-helper` and use the actually parsed OID when updating the loose oid cache. Previously, an uninitialized "struct oid" was used to update the cache. This did not cause any corruption, but could cause extra fetches for objects visited multiple times. Signed-off-by: Jeff Hostetler --- gvfs-helper-client.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c index 41e05f4ce4b6c2..ae2c5d1951b5ec 100644 --- a/gvfs-helper-client.c +++ b/gvfs-helper-client.c @@ -113,6 +113,9 @@ static void gh_client__update_loose_cache(const char *line) if (!skip_prefix(line, "loose ", &v1_oid)) BUG("update_loose_cache: invalid line '%s'", line); + if (get_oid_hex(v1_oid, &oid)) + BUG("update_loose_cache: invalid line '%s'", line); + odb_loose_cache_add_new_oid(gh_client__chosen_odb, &oid); } From 0eb1006a8ef40b297af85f889efcbc85d5234fa1 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Thu, 10 Oct 2019 10:58:07 -0400 Subject: [PATCH 079/104] gvfs-helper: V2 robust retry and throttling Add robust-retry mechanism to automatically retry a request after network errors. This includes retry after: [] transient network problems reported by CURL. [] http 429 throttling (with associated Retry-After) [] http 503 server unavailable (with associated Retry-After) Add voluntary throttling using Azure X-RateLimit-* hints to avoid being soft-throttled (tarpitted) or hard-throttled (429) on later requests. Add global (outside of a single request) azure-throttle data to track the rate limit hints from the cache-server and main Git server independently. Add exponential retry backoff. This is used for transient network problems when we don't have a Retry-After hint. Move the call to index-pack earlier in the response/error handling sequence so that if we receive a 200 but yet the packfile is truncated/corrupted, we can use the regular retry logic to get it again. Refactor the way we create tempfiles for packfiles to use /pack/tempPacks/ rather than working directly in the /pack/ directory. Move the code to create a new tempfile to the start of a single request attempt (initial and retry attempts), rather than at the overall start of a request. This gives us a fresh tempfile for each network request attempt. This simplifies the retry mechanism and isolates us from the file ownership issues hidden within the tempfile class. And avoids the need to truncate previous incomplete results. This was necessary because index-pack was pulled into the retry loop. Minor: Add support for logging X-VSS-E2EID to telemetry on network errors. Minor: rename variable: params.b_no_cache_server --> params.b_permit_cache_server_if_defined. This variable is used to indicate whether we should try to use the cache-server when it is defined. Got rid of double-negative logic. Minor: rename variable: params.label --> params.tr2_label Clarify that this variable is only used with trace2 logging. Minor: Move the code to automatically map cache-server 400 responses to normal 401 response earlier in the response/error handling sequence to simplify later retry logic. Minor: Decorate trace2 messages with "(cs)" or "(main)" to identify the server in log messages. Add params->server_type to simplify this. Signed-off-by: Jeff Hostetler --- gvfs-helper.c | 1429 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 1126 insertions(+), 303 deletions(-) diff --git a/gvfs-helper.c b/gvfs-helper.c index be7b154e9fdfb3..33a623edf856da 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -61,6 +61,11 @@ // // --depth= // defaults to "1" // +// --max-retries= // defaults to "6" +// +// Number of retries after transient network errors. +// Set to zero to disable such retries. +// // server // // Interactive/sub-process mode. Listen for a series of commands @@ -77,6 +82,11 @@ // // --depth= // defaults to "1" // +// --max-retries= // defaults to "6" +// +// Number of retries after transient network errors. +// Set to zero to disable such retries. +// // Interactive verb: get // // Fetch 1 or more objects. If a cache-server is configured, @@ -166,8 +176,26 @@ static const char *const server_usage[] = { NULL }; +/* + * "commitDepth" field in gvfs protocol + */ +#define GH__DEFAULT_COMMIT_DEPTH 1 + +/* + * Chunk/block size in number of objects we request in each packfile + */ #define GH__DEFAULT_BLOCK_SIZE 4000 +/* + * Retry attempts (after the initial request) for transient errors and 429s. + */ +#define GH__DEFAULT_MAX_RETRIES 6 + +/* + * Maximum delay in seconds for transient (network) error retries. + */ +#define GH__DEFAULT_MAX_TRANSIENT_BACKOFF_SEC 300 + /* * Our exit-codes. */ @@ -175,16 +203,18 @@ enum gh__error_code { GH__ERROR_CODE__USAGE = -1, /* will be mapped to usage() */ GH__ERROR_CODE__OK = 0, GH__ERROR_CODE__ERROR = 1, /* unspecified */ -// GH__ERROR_CODE__CACHE_SERVER_NOT_FOUND = 2, - GH__ERROR_CODE__CURL_ERROR = 3, - GH__ERROR_CODE__HTTP_401 = 4, - GH__ERROR_CODE__HTTP_404 = 5, - GH__ERROR_CODE__HTTP_UNEXPECTED_CODE = 6, - GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE = 7, + GH__ERROR_CODE__CURL_ERROR = 2, + GH__ERROR_CODE__HTTP_401 = 3, + GH__ERROR_CODE__HTTP_404 = 4, + GH__ERROR_CODE__HTTP_429 = 5, + GH__ERROR_CODE__HTTP_503 = 6, + GH__ERROR_CODE__HTTP_OTHER = 7, + GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE = 8, GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE = 8, - GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE = 9, - GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE = 10, - GH__ERROR_CODE__SUBPROCESS_SYNTAX = 11, + GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE = 10, + GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE = 11, + GH__ERROR_CODE__SUBPROCESS_SYNTAX = 12, + GH__ERROR_CODE__INDEX_PACK_FAILED = 13, }; enum gh__cache_server_mode { @@ -210,6 +240,8 @@ static struct gh__cmd_opts { int depth; int block_size; + int max_retries; + int max_transient_backoff_sec; enum gh__cache_server_mode cache_server_mode; } gh__cmd_opts; @@ -234,6 +266,47 @@ static struct gh__global { } gh__global; +enum gh__server_type { + GH__SERVER_TYPE__MAIN = 0, + GH__SERVER_TYPE__CACHE = 1, + + GH__SERVER_TYPE__NR, +}; + +static const char *gh__server_type_label[GH__SERVER_TYPE__NR] = { + "(main)", + "(cs)" +}; + +struct gh__azure_throttle +{ + unsigned long tstu_limit; + unsigned long tstu_remaining; + + unsigned long reset_sec; + unsigned long retry_after_sec; +}; + +static void gh__azure_throttle__zero(struct gh__azure_throttle *azure) +{ + azure->tstu_limit = 0; + azure->tstu_remaining = 0; + azure->reset_sec = 0; + azure->retry_after_sec = 0; +} + +#define GH__AZURE_THROTTLE_INIT { \ + .tstu_limit = 0, \ + .tstu_remaining = 0, \ + .reset_sec = 0, \ + .retry_after_sec = 0, \ + } + +static struct gh__azure_throttle gh__global_throttle[GH__SERVER_TYPE__NR] = { + GH__AZURE_THROTTLE_INIT, + GH__AZURE_THROTTLE_INIT, +}; + /* * Stolen from http.c */ @@ -262,7 +335,12 @@ enum gh__progress_state { struct gh__request_params { int b_is_post; /* POST=1 or GET=0 */ int b_write_to_file; /* write to file=1 or strbuf=0 */ - int b_no_cache_server; /* force main server only */ + int b_permit_cache_server_if_defined; + + enum gh__server_type server_type; + + int k_attempt; /* robust retry attempt */ + int k_transient_delay_sec; /* delay before transient error retries */ unsigned long object_count; /* number of objects being fetched */ @@ -271,9 +349,16 @@ struct gh__request_params { struct curl_slist *headers; /* additional http headers to send */ struct tempfile *tempfile; /* for response content when file */ struct strbuf *buffer; /* for response content when strbuf */ - struct strbuf label; /* for trace2 regions */ + struct strbuf tr2_label; /* for trace2 regions */ struct strbuf loose_path; + struct object_id loose_oid; + + struct strbuf temp_path_pack; + struct strbuf temp_path_idx; + struct strbuf final_path_pack; + struct strbuf final_path_idx; + struct strbuf final_packfile_filename; /* * Note that I am putting all of the progress-related instance data @@ -295,24 +380,36 @@ struct gh__request_params { */ struct strbuf progress_msg; struct progress *progress; + + struct strbuf e2eid; }; #define GH__REQUEST_PARAMS_INIT { \ .b_is_post = 0, \ .b_write_to_file = 0, \ - .b_no_cache_server = 0, \ + .b_permit_cache_server_if_defined = 1, \ + .server_type = GH__SERVER_TYPE__MAIN, \ + .k_attempt = 0, \ + .k_transient_delay_sec = 0, \ .object_count = 0, \ .post_payload = NULL, \ .headers = NULL, \ .tempfile = NULL, \ .buffer = NULL, \ - .label = STRBUF_INIT, \ + .tr2_label = STRBUF_INIT, \ .loose_path = STRBUF_INIT, \ + .loose_oid = {{0}}, \ + .temp_path_pack = STRBUF_INIT, \ + .temp_path_idx = STRBUF_INIT, \ + .final_path_pack = STRBUF_INIT, \ + .final_path_idx = STRBUF_INIT, \ + .final_packfile_filename = STRBUF_INIT, \ .progress_state = GH__PROGRESS_STATE__START, \ .progress_base_phase2_msg = STRBUF_INIT, \ .progress_base_phase3_msg = STRBUF_INIT, \ .progress_msg = STRBUF_INIT, \ .progress = NULL, \ + .e2eid = STRBUF_INIT, \ } static void gh__request_params__release(struct gh__request_params *params) @@ -329,8 +426,13 @@ static void gh__request_params__release(struct gh__request_params *params) params->buffer = NULL; /* we do not own this */ - strbuf_release(¶ms->label); + strbuf_release(¶ms->tr2_label); strbuf_release(¶ms->loose_path); + strbuf_release(¶ms->temp_path_pack); + strbuf_release(¶ms->temp_path_idx); + strbuf_release(¶ms->final_path_pack); + strbuf_release(¶ms->final_path_idx); + strbuf_release(¶ms->final_packfile_filename); strbuf_release(¶ms->progress_base_phase2_msg); strbuf_release(¶ms->progress_base_phase3_msg); @@ -338,8 +440,55 @@ static void gh__request_params__release(struct gh__request_params *params) stop_progress(¶ms->progress); params->progress = NULL; + + strbuf_release(¶ms->e2eid); } +/* + * How we handle retries for various unexpected network errors. + */ +enum gh__retry_mode { + /* + * The operation was successful, so no retry is needed. + * Use this for HTTP 200, for example. + */ + GH__RETRY_MODE__SUCCESS = 0, + + /* + * Retry using the normal 401 Auth mechanism. + */ + GH__RETRY_MODE__HTTP_401, + + /* + * Fail because at least one of the requested OIDs does not exist. + */ + GH__RETRY_MODE__FAIL_404, + + /* + * A transient network error, such as dropped connection + * or network IO error. Our belief is that a retry MAY + * succeed. (See Gremlins and Cosmic Rays....) + */ + GH__RETRY_MODE__TRANSIENT, + + /* + * Request was blocked completely because of a 429. + */ + GH__RETRY_MODE__HTTP_429, + + /* + * Request failed because the server was (temporarily?) offline. + */ + GH__RETRY_MODE__HTTP_503, + + /* + * The operation had a hard failure and we have no + * expectation that a second attempt will give a different + * answer, such as a bad hostname or a mal-formed URL. + */ + GH__RETRY_MODE__HARD_FAIL, +}; + /* * Bucket to describe the results of an HTTP requests (may be * overwritten during retries so that it describes the final attempt). @@ -350,7 +499,9 @@ struct gh__response_status { long response_code; /* http response code */ CURLcode curl_code; enum gh__error_code ec; + enum gh__retry_mode retry; intmax_t bytes_received; + struct gh__azure_throttle *azure; }; #define GH__RESPONSE_STATUS_INIT { \ @@ -359,7 +510,9 @@ struct gh__response_status { .response_code = 0, \ .curl_code = CURLE_OK, \ .ec = GH__ERROR_CODE__OK, \ + .retry = GH__RETRY_MODE__SUCCESS, \ .bytes_received = 0, \ + .azure = NULL, \ } static void gh__response_status__zero(struct gh__response_status *s) @@ -369,7 +522,323 @@ static void gh__response_status__zero(struct gh__response_status *s) s->response_code = 0; s->curl_code = CURLE_OK; s->ec = GH__ERROR_CODE__OK; + s->retry = GH__RETRY_MODE__SUCCESS; s->bytes_received = 0; + s->azure = NULL; +} + +static void install_packfile(struct gh__request_params *params, + struct gh__response_status *status); +static void install_loose(struct gh__request_params *params, + struct gh__response_status *status); + +/* + * Log the E2EID for the current request. + * + * Since every HTTP request to the cache-server and to the main Git server + * will send back a unique E2EID (probably a GUID), we don't want to overload + * telemetry with each ID -- rather, only the ones for which there was a + * problem and that may be helpful in a post mortem. + */ +static void log_e2eid(struct gh__request_params *params, + struct gh__response_status *status) +{ + if (!params->e2eid.len) + return; + + switch (status->retry) { + default: + case GH__RETRY_MODE__SUCCESS: + case GH__RETRY_MODE__HTTP_401: + case GH__RETRY_MODE__FAIL_404: + return; + + case GH__RETRY_MODE__HARD_FAIL: + case GH__RETRY_MODE__TRANSIENT: + case GH__RETRY_MODE__HTTP_429: + case GH__RETRY_MODE__HTTP_503: + break; + } + + if (trace2_is_enabled()) { + struct strbuf key = STRBUF_INIT; + + strbuf_addstr(&key, "e2eid"); + strbuf_addstr(&key, gh__server_type_label[params->server_type]); + + trace2_data_string("gvfs-helper", NULL, key.buf, + params->e2eid.buf); + + strbuf_release(&key); + } +} + +/* + * Normalize a few error codes before we try to decide + * how to dispatch on them. + */ +static void gh__response_status__normalize_odd_codes( + struct gh__request_params *params, + struct gh__response_status *status) +{ + if (params->server_type == GH__SERVER_TYPE__CACHE && + status->response_code == 400) { + /* + * The cache-server sends a somewhat bogus 400 instead of + * the normal 401 when AUTH is required. Fixup the status + * to hide that. + * + * TODO Technically, the cache-server could send a 400 + * TODO for many reasons, not just for their bogus + * TODO pseudo-401, but we're going to assume it is a + * TODO 401 for now. We should confirm the expected + * TODO error message in the response-body. + */ + status->response_code = 401; + } + + if (status->response_code == 203) { + /* + * A proxy server transformed a 200 from the origin server + * into a 203. We don't care about the subtle distinction. + */ + status->response_code = 200; + } +} + +/* + * Map HTTP response codes into a retry strategy. + * See https://en.wikipedia.org/wiki/List_of_HTTP_status_codes + * + * https://docs.microsoft.com/en-us/azure/devops/integrate/concepts/rate-limits?view=azure-devops + */ +static void compute_retry_mode_from_http_response( + struct gh__response_status *status) +{ + switch (status->response_code) { + + case 200: + status->retry = GH__RETRY_MODE__SUCCESS; + status->ec = GH__ERROR_CODE__OK; + return; + + case 301: /* all the various flavors of HTTP Redirect */ + case 302: + case 303: + case 304: + case 305: + case 306: + case 307: + case 308: + /* + * TODO Consider a redirected-retry (with or without + * TODO a Retry-After header). + */ + goto hard_fail; + + case 401: + strbuf_addstr(&status->error_message, + "(http:401) Not Authorized"); + status->retry = GH__RETRY_MODE__HTTP_401; + status->ec = GH__ERROR_CODE__HTTP_401; + return; + + case 404: + /* + * TODO if params->object_count > 1, consider + * TODO splitting the request into 2 halves + * TODO and retrying each half in series. + */ + strbuf_addstr(&status->error_message, + "(http:404) Not Found"); + status->retry = GH__RETRY_MODE__FAIL_404; + status->ec = GH__ERROR_CODE__HTTP_404; + return; + + case 429: + /* + * This is a hard block because we've been bad. + */ + strbuf_addstr(&status->error_message, + "(http:429) Too Many Requests [throttled]"); + status->retry = GH__RETRY_MODE__HTTP_429; + status->ec = GH__ERROR_CODE__HTTP_429; + + trace2_data_string("gvfs-helper", NULL, "error/http", + status->error_message.buf); + return; + + case 503: + /* + * We assume that this comes with a "Retry-After" header like 429s. + */ + strbuf_addstr(&status->error_message, + "(http:503) Server Unavailable [throttled]"); + status->retry = GH__RETRY_MODE__HTTP_503; + status->ec = GH__ERROR_CODE__HTTP_503; + + trace2_data_string("gvfs-helper", NULL, "error/http", + status->error_message.buf); + return; + + default: + goto hard_fail; + } + +hard_fail: + strbuf_addf(&status->error_message, "(http:%d) Other [hard_fail]", + (int)status->response_code); + status->retry = GH__RETRY_MODE__HARD_FAIL; + status->ec = GH__ERROR_CODE__HTTP_OTHER; + + trace2_data_string("gvfs-helper", NULL, "error/http", + status->error_message.buf); + return; +} + +/* + * Map CURLE errors code to a retry strategy. + * See and + * https://curl.haxx.se/libcurl/c/libcurl-errors.html + * + * This could be a static table rather than a switch, but + * that is harder to debug and we may want to selectively + * log errors. + * + * I've commented out all of the hard-fail cases for now + * and let the default handle them. This is to indicate + * that I considered them and found them to be not actionable. + * Also, the spelling of some of the CURLE_ symbols seem + * to change between curl releases on different platforms, + * so I'm not going to fight that. + */ +static void compute_retry_mode_from_curl_error( + struct gh__response_status *status) +{ + switch (status->curl_code) { + case CURLE_OK: + status->retry = GH__RETRY_MODE__SUCCESS; + status->ec = GH__ERROR_CODE__OK; + return; + + //se CURLE_UNSUPPORTED_PROTOCOL: goto hard_fail; + //se CURLE_FAILED_INIT: goto hard_fail; + //se CURLE_URL_MALFORMAT: goto hard_fail; + //se CURLE_NOT_BUILT_IN: goto hard_fail; + //se CURLE_COULDNT_RESOLVE_PROXY: goto hard_fail; + //se CURLE_COULDNT_RESOLVE_HOST: goto hard_fail; + case CURLE_COULDNT_CONNECT: goto transient; + //se CURLE_WEIRD_SERVER_REPLY: goto hard_fail; + //se CURLE_REMOTE_ACCESS_DENIED: goto hard_fail; + //se CURLE_FTP_ACCEPT_FAILED: goto hard_fail; + //se CURLE_FTP_WEIRD_PASS_REPLY: goto hard_fail; + //se CURLE_FTP_ACCEPT_TIMEOUT: goto hard_fail; + //se CURLE_FTP_WEIRD_PASV_REPLY: goto hard_fail; + //se CURLE_FTP_WEIRD_227_FORMAT: goto hard_fail; + //se CURLE_FTP_CANT_GET_HOST: goto hard_fail; + case CURLE_HTTP2: goto transient; + //se CURLE_FTP_COULDNT_SET_TYPE: goto hard_fail; + case CURLE_PARTIAL_FILE: goto transient; + //se CURLE_FTP_COULDNT_RETR_FILE: goto hard_fail; + //se CURLE_OBSOLETE20: goto hard_fail; + //se CURLE_QUOTE_ERROR: goto hard_fail; + //se CURLE_HTTP_RETURNED_ERROR: goto hard_fail; + case CURLE_WRITE_ERROR: goto transient; + //se CURLE_OBSOLETE24: goto hard_fail; + case CURLE_UPLOAD_FAILED: goto transient; + //se CURLE_READ_ERROR: goto hard_fail; + //se CURLE_OUT_OF_MEMORY: goto hard_fail; + case CURLE_OPERATION_TIMEDOUT: goto transient; + //se CURLE_OBSOLETE29: goto hard_fail; + //se CURLE_FTP_PORT_FAILED: goto hard_fail; + //se CURLE_FTP_COULDNT_USE_REST: goto hard_fail; + //se CURLE_OBSOLETE32: goto hard_fail; + //se CURLE_RANGE_ERROR: goto hard_fail; + case CURLE_HTTP_POST_ERROR: goto transient; + //se CURLE_SSL_CONNECT_ERROR: goto hard_fail; + //se CURLE_BAD_DOWNLOAD_RESUME: goto hard_fail; + //se CURLE_FILE_COULDNT_READ_FILE: goto hard_fail; + //se CURLE_LDAP_CANNOT_BIND: goto hard_fail; + //se CURLE_LDAP_SEARCH_FAILED: goto hard_fail; + //se CURLE_OBSOLETE40: goto hard_fail; + //se CURLE_FUNCTION_NOT_FOUND: goto hard_fail; + //se CURLE_ABORTED_BY_CALLBACK: goto hard_fail; + //se CURLE_BAD_FUNCTION_ARGUMENT: goto hard_fail; + //se CURLE_OBSOLETE44: goto hard_fail; + //se CURLE_INTERFACE_FAILED: goto hard_fail; + //se CURLE_OBSOLETE46: goto hard_fail; + //se CURLE_TOO_MANY_REDIRECTS: goto hard_fail; + //se CURLE_UNKNOWN_OPTION: goto hard_fail; + //se CURLE_TELNET_OPTION_SYNTAX: goto hard_fail; + //se CURLE_OBSOLETE50: goto hard_fail; + //se CURLE_PEER_FAILED_VERIFICATION: goto hard_fail; + //se CURLE_GOT_NOTHING: goto hard_fail; + //se CURLE_SSL_ENGINE_NOTFOUND: goto hard_fail; + //se CURLE_SSL_ENGINE_SETFAILED: goto hard_fail; + case CURLE_SEND_ERROR: goto transient; + case CURLE_RECV_ERROR: goto transient; + //se CURLE_OBSOLETE57: goto hard_fail; + //se CURLE_SSL_CERTPROBLEM: goto hard_fail; + //se CURLE_SSL_CIPHER: goto hard_fail; + //se CURLE_SSL_CACERT: goto hard_fail; + //se CURLE_BAD_CONTENT_ENCODING: goto hard_fail; + //se CURLE_LDAP_INVALID_URL: goto hard_fail; + //se CURLE_FILESIZE_EXCEEDED: goto hard_fail; + //se CURLE_USE_SSL_FAILED: goto hard_fail; + //se CURLE_SEND_FAIL_REWIND: goto hard_fail; + //se CURLE_SSL_ENGINE_INITFAILED: goto hard_fail; + //se CURLE_LOGIN_DENIED: goto hard_fail; + //se CURLE_TFTP_NOTFOUND: goto hard_fail; + //se CURLE_TFTP_PERM: goto hard_fail; + //se CURLE_REMOTE_DISK_FULL: goto hard_fail; + //se CURLE_TFTP_ILLEGAL: goto hard_fail; + //se CURLE_TFTP_UNKNOWNID: goto hard_fail; + //se CURLE_REMOTE_FILE_EXISTS: goto hard_fail; + //se CURLE_TFTP_NOSUCHUSER: goto hard_fail; + //se CURLE_CONV_FAILED: goto hard_fail; + //se CURLE_CONV_REQD: goto hard_fail; + //se CURLE_SSL_CACERT_BADFILE: goto hard_fail; + //se CURLE_REMOTE_FILE_NOT_FOUND: goto hard_fail; + //se CURLE_SSH: goto hard_fail; + //se CURLE_SSL_SHUTDOWN_FAILED: goto hard_fail; + case CURLE_AGAIN: goto transient; + //se CURLE_SSL_CRL_BADFILE: goto hard_fail; + //se CURLE_SSL_ISSUER_ERROR: goto hard_fail; + //se CURLE_FTP_PRET_FAILED: goto hard_fail; + //se CURLE_RTSP_CSEQ_ERROR: goto hard_fail; + //se CURLE_RTSP_SESSION_ERROR: goto hard_fail; + //se CURLE_FTP_BAD_FILE_LIST: goto hard_fail; + //se CURLE_CHUNK_FAILED: goto hard_fail; + //se CURLE_NO_CONNECTION_AVAILABLE: goto hard_fail; + //se CURLE_SSL_PINNEDPUBKEYNOTMATCH: goto hard_fail; + //se CURLE_SSL_INVALIDCERTSTATUS: goto hard_fail; +#ifdef CURLE_HTTP2_STREAM + case CURLE_HTTP2_STREAM: goto transient; +#endif + default: goto hard_fail; + } + +hard_fail: + strbuf_addf(&status->error_message, "(curl:%d) %s [hard_fail]", + status->curl_code, + curl_easy_strerror(status->curl_code)); + status->retry = GH__RETRY_MODE__HARD_FAIL; + status->ec = GH__ERROR_CODE__CURL_ERROR; + + trace2_data_string("gvfs-helper", NULL, "error/curl", + status->error_message.buf); + return; + +transient: + strbuf_addf(&status->error_message, "(curl:%d) %s [transient]", + status->curl_code, + curl_easy_strerror(status->curl_code)); + status->retry = GH__RETRY_MODE__TRANSIENT; + status->ec = GH__ERROR_CODE__CURL_ERROR; + + trace2_data_string("gvfs-helper", NULL, "error/curl", + status->error_message.buf); + return; } /* @@ -390,32 +859,18 @@ static void gh__response_status__set_from_slot( strbuf_setlen(&status->error_message, 0); - if (status->response_code == 200) - status->ec = GH__ERROR_CODE__OK; - - else if (status->response_code == 401) { - strbuf_addstr(&status->error_message, "401 Not Authorized"); - status->ec = GH__ERROR_CODE__HTTP_401; - - } else if (status->response_code == 404) { - strbuf_addstr(&status->error_message, "404 Not Found"); - status->ec = GH__ERROR_CODE__HTTP_404; - - } else if (status->curl_code != CURLE_OK) { - strbuf_addf(&status->error_message, "%s (curl)", - curl_easy_strerror(status->curl_code)); - status->ec = GH__ERROR_CODE__CURL_ERROR; - - trace2_data_string("gvfs-helper", NULL, - "error/curl", status->error_message.buf); - } else { - strbuf_addf(&status->error_message, "HTTP %ld Unexpected", - status->response_code); - status->ec = GH__ERROR_CODE__HTTP_UNEXPECTED_CODE; + gh__response_status__normalize_odd_codes(params, status); - trace2_data_string("gvfs-helper", NULL, - "error/http", status->error_message.buf); - } + /* + * Use normalized response/status codes form curl/http to decide + * how to set the error-code we propagate *AND* to decide if we + * we should retry because of transient network problems. + */ + if (status->curl_code == CURLE_OK || + status->curl_code == CURLE_HTTP_RETURNED_ERROR) + compute_retry_mode_from_http_response(status); + else + compute_retry_mode_from_curl_error(status); if (status->ec != GH__ERROR_CODE__OK) status->bytes_received = 0; @@ -433,26 +888,6 @@ static void gh__response_status__release(struct gh__response_status *status) strbuf_release(&status->content_type); } -/* - * The cache-server sends a somewhat bogus 400 instead of - * the normal 401 when AUTH is required. Fixup the status - * to hide that. - */ -static void fixup_cache_server_400_to_401(struct gh__response_status *status) -{ - if (status->response_code != 400) - return; - - /* - * TODO Technically, the cache-server could send a 400 - * TODO for many reasons, not just for their bogus - * TODO pseudo-401, but we're going to assume it is a - * TODO 401 for now. We should confirm the expected - * TODO error message in the response-body. - */ - status->response_code = 401; -} - static int gh__curl_progress_cb(void *clientp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) @@ -543,8 +978,13 @@ static int gh__curl_progress_cb(void *clientp, enter_phase_2: strbuf_setlen(¶ms->progress_msg, 0); if (params->progress_base_phase2_msg.len) { - strbuf_addf(¶ms->progress_msg, "%s (bytes sent)", - params->progress_base_phase2_msg.buf); + if (params->k_attempt > 0) + strbuf_addf(¶ms->progress_msg, "%s [retry %d/%d] (bytes sent)", + params->progress_base_phase2_msg.buf, + params->k_attempt, gh__cmd_opts.max_retries); + else + strbuf_addf(¶ms->progress_msg, "%s (bytes sent)", + params->progress_base_phase2_msg.buf); params->progress = start_progress(params->progress_msg.buf, ultotal); display_progress(params->progress, ulnow); } @@ -554,8 +994,13 @@ static int gh__curl_progress_cb(void *clientp, enter_phase_3: strbuf_setlen(¶ms->progress_msg, 0); if (params->progress_base_phase3_msg.len) { - strbuf_addf(¶ms->progress_msg, "%s (bytes received)", - params->progress_base_phase3_msg.buf); + if (params->k_attempt > 0) + strbuf_addf(¶ms->progress_msg, "%s [retry %d/%d] (bytes received)", + params->progress_base_phase3_msg.buf, + params->k_attempt, gh__cmd_opts.max_retries); + else + strbuf_addf(¶ms->progress_msg, "%s (bytes received)", + params->progress_base_phase3_msg.buf); params->progress = start_progress(params->progress_msg.buf, dltotal); display_progress(params->progress, dlnow); } @@ -572,12 +1017,19 @@ static void gh__run_one_slot(struct active_request_slot *slot, struct gh__request_params *params, struct gh__response_status *status) { - trace2_region_enter("gvfs-helper", params->label.buf, NULL); + struct strbuf key = STRBUF_INIT; + + strbuf_addbuf(&key, ¶ms->tr2_label); + strbuf_addstr(&key, gh__server_type_label[params->server_type]); + + params->progress_state = GH__PROGRESS_STATE__START; + strbuf_setlen(¶ms->e2eid, 0); + + trace2_region_enter("gvfs-helper", key.buf, NULL); if (!start_active_slot(slot)) { status->curl_code = CURLE_FAILED_INIT; /* a bit of a lie */ - strbuf_addstr(&status->error_message, - "failed to start HTTP request"); + compute_retry_mode_from_curl_error(status); } else { run_active_slot(slot); if (params->b_write_to_file) @@ -585,27 +1037,38 @@ static void gh__run_one_slot(struct active_request_slot *slot, gh__response_status__set_from_slot(params, status, slot); - if (status->ec == GH__ERROR_CODE__OK) { - int old_len = params->label.len; + log_e2eid(params, status); - strbuf_addstr(¶ms->label, "/nr_objects"); - trace2_data_intmax("gvfs-helper", NULL, - params->label.buf, - params->object_count); - strbuf_setlen(¶ms->label, old_len); + if (status->ec == GH__ERROR_CODE__OK) { + int old_len = key.len; - strbuf_addstr(¶ms->label, "/nr_bytes"); + /* + * We only log the number of bytes received. + * We do not log the number of objects requested + * because the server may give us more than that + * (such as when we request a commit). + */ + strbuf_addstr(&key, "/nr_bytes"); trace2_data_intmax("gvfs-helper", NULL, - params->label.buf, + key.buf, status->bytes_received); - strbuf_setlen(¶ms->label, old_len); + strbuf_setlen(&key, old_len); } } if (params->progress) stop_progress(¶ms->progress); - trace2_region_leave("gvfs-helper", params->label.buf, NULL); + if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file) { + if (params->b_is_post) + install_packfile(params, status); + else + install_loose(params, status); + } + + trace2_region_leave("gvfs-helper", key.buf, NULL); + + strbuf_release(&key); } static int option_parse_cache_server_mode(const struct option *opt, @@ -1053,41 +1516,115 @@ static void select_odb(void) * * TODO Consider using lockfile for this rather than naked tempfile. */ -static struct tempfile *create_tempfile_for_packfile(void) +static void create_tempfile_for_packfile( + struct gh__request_params *params, + struct gh__response_status *status) { static unsigned int nth = 0; static struct timeval tv = {0}; static struct tm tm = {0}; static time_t secs = 0; - static char tbuf[32] = {0}; + static char date[32] = {0}; - struct tempfile *tempfile = NULL; - struct strbuf buf_path = STRBUF_INIT; + struct strbuf basename = STRBUF_INIT; + struct strbuf buf = STRBUF_INIT; + int len_p; + enum scld_error scld; + + gh__response_status__zero(status); if (!nth) { + /* + * Create a string to use in the name of all packfiles + * created by this process. + */ gettimeofday(&tv, NULL); secs = tv.tv_sec; gmtime_r(&secs, &tm); - xsnprintf(tbuf, sizeof(tbuf), "%4d%02d%02d-%02d%02d%02d-%06ld", + xsnprintf(date, sizeof(date), "%4d%02d%02d-%02d%02d%02d-%06ld", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, (long)tv.tv_usec); } - // TODO should this be in the "/pack/tempPacks/" - // TODO directory instead? YES + /* + * Create a for this packfile using a series number , + * so that all of the chunks we download will group together. + */ + strbuf_addf(&basename, "vfs-%s-%04d", date, nth++); - strbuf_addbuf(&buf_path, &gh__global.buf_odb_path); - strbuf_complete(&buf_path, '/'); - strbuf_addf(&buf_path, "pack/vfs-%s-%04d.temp", tbuf, nth++); + /* + * We will stream the data into a managed tempfile() in: + * + * "/pack/tempPacks/vfs--.temp" + */ + strbuf_setlen(&buf, 0); + strbuf_addbuf(&buf, &gh__global.buf_odb_path); + strbuf_complete(&buf, '/'); + strbuf_addstr(&buf, "pack/"); + len_p = buf.len; + strbuf_addstr(&buf, "tempPacks/"); + strbuf_addbuf(&buf, &basename); + strbuf_addstr(&buf, ".temp"); + + scld = safe_create_leading_directories(buf.buf); + if (scld != SCLD_OK && scld != SCLD_EXISTS) { + strbuf_addf(&status->error_message, + "could not create directory for packfile: '%s'", + buf.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; + goto cleanup; + } + + params->tempfile = create_tempfile(buf.buf); + if (!params->tempfile) { + strbuf_addf(&status->error_message, + "could not create tempfile for packfile: '%s'", + buf.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; + goto cleanup; + } - tempfile = create_tempfile(buf_path.buf); - fdopen_tempfile(tempfile, "w"); + fdopen_tempfile(params->tempfile, "w"); - strbuf_release(&buf_path); + /* + * After the download is complete, we will need to steal the file + * from the tempfile() class (so that it doesn't magically delete + * it when we close the file handle) and then index it. + * + * We do this into the tempPacks directory to avoid contaminating + * the real pack directory until we know there is no corruption. + * + * "/pack/tempPacks/vfs--.temp.pack" + * "/pack/tempPacks/vfs--.temp.idx" + */ + strbuf_setlen(¶ms->temp_path_pack, 0); + strbuf_addf(¶ms->temp_path_pack, "%s.pack", buf.buf); + + strbuf_setlen(¶ms->temp_path_idx, 0); + strbuf_addf(¶ms->temp_path_idx, "%s.idx", buf.buf); + + /* + * Later, if all goes well, we will install them as: + * + * "/pack/vfs--.pack" + * "/pack/vfs--.idx" + */ + strbuf_setlen(&buf, len_p); + strbuf_setlen(¶ms->final_path_pack, 0); + strbuf_addf(¶ms->final_path_pack, "%s%s.pack", + buf.buf, basename.buf); + strbuf_setlen(¶ms->final_path_idx, 0); + strbuf_addf(¶ms->final_path_idx, "%s%s.idx", + buf.buf, basename.buf); + strbuf_setlen(¶ms->final_packfile_filename, 0); + strbuf_addf(¶ms->final_packfile_filename, "%s.pack", + basename.buf); - return tempfile; +cleanup: + strbuf_release(&buf); + strbuf_release(&basename); } /* @@ -1100,15 +1637,15 @@ static struct tempfile *create_tempfile_for_packfile(void) */ static void create_tempfile_for_loose( struct gh__request_params *params, - struct gh__response_status *status, - const struct object_id *oid) + struct gh__response_status *status) { + static int nth = 0; struct strbuf buf_path = STRBUF_INIT; const char *hex; gh__response_status__zero(status); - hex = oid_to_hex(oid); + hex = oid_to_hex(¶ms->loose_oid); strbuf_addbuf(&buf_path, &gh__global.buf_odb_path); strbuf_complete(&buf_path, '/'); @@ -1136,7 +1673,7 @@ static void create_tempfile_for_loose( * using lockfiles to avoid issues with stale locks after * crashes. */ - strbuf_addf(&buf_path, ".%08u.temp", getpid()); + strbuf_addf(&buf_path, ".%08u.%.06u.temp", getpid(), nth++); params->tempfile = create_tempfile(buf_path.buf); if (!params->tempfile) { @@ -1153,85 +1690,34 @@ static void create_tempfile_for_loose( } /* - * Extract the filename portion of the given pathname. - * - * TODO Wish I could find a strbuf_filename() function for this. + * Convert the tempfile into a temporary .pack, index it into a temporary .idx + * file, and then install the pair into ODB. */ -static void extract_filename(struct strbuf *filename, - const struct strbuf *pathname) -{ - size_t len = pathname->len; - - strbuf_setlen(filename, 0); - - while (len > 0 && !is_dir_sep(pathname->buf[len - 1])) - len--; - - strbuf_addstr(filename, &pathname->buf[len]); -} - -/* - * Convert the tempfile into a permanent .pack packfile in the ODB. - * Create the corresponding .idx file. - * - * Return the filename (not pathname) of the resulting packfile. - */ -static void install_packfile(struct gh__response_status *status, - struct tempfile **pp_tempfile, - struct strbuf *packfile_filename) +static void install_packfile(struct gh__request_params *params, + struct gh__response_status *status) { struct child_process ip = CHILD_PROCESS_INIT; - struct strbuf pack_name_tmp = STRBUF_INIT; - struct strbuf pack_name_dst = STRBUF_INIT; - struct strbuf idx_name_tmp = STRBUF_INIT; - struct strbuf idx_name_dst = STRBUF_INIT; - size_t len_base; - - gh__response_status__zero(status); - - strbuf_setlen(packfile_filename, 0); /* - * start with ".temp" (that is owned by tempfile class). - * rename to ".pack.temp" to break ownership. - * - * create ".idx.temp" on provisional packfile. - * - * officially install both ".{pack,idx}.temp" as - * ".{pack,idx}". + * When we request more than 1 object, the server should always + * send us a packfile. */ - - strbuf_addstr(&pack_name_tmp, get_tempfile_path(*pp_tempfile)); - if (!strip_suffix(pack_name_tmp.buf, ".temp", &len_base)) { - /* - * This is more of a BUG(), but I want the error - * code propagated. - */ + if (strcmp(status->content_type.buf, + "application/x-git-packfile")) { strbuf_addf(&status->error_message, - "packfile tempfile does not end in '.temp': '%s'", - pack_name_tmp.buf); - status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; + "received unknown content-type '%s'", + status->content_type.buf); + status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; goto cleanup; } - strbuf_setlen(&pack_name_tmp, (int)len_base); - strbuf_addbuf(&pack_name_dst, &pack_name_tmp); - strbuf_addbuf(&idx_name_tmp, &pack_name_tmp); - strbuf_addbuf(&idx_name_dst, &pack_name_tmp); - - strbuf_addstr(&pack_name_tmp, ".pack.temp"); - strbuf_addstr(&pack_name_dst, ".pack"); - strbuf_addstr(&idx_name_tmp, ".idx.temp"); - strbuf_addstr(&idx_name_dst, ".idx"); - - // TODO if either pack_name_dst or idx_name_dst already - // TODO exists in the ODB, create alternate names so that - // TODO we don't step on them. + gh__response_status__zero(status); - if (rename_tempfile(pp_tempfile, pack_name_tmp.buf) == -1) { + if (rename_tempfile(¶ms->tempfile, + params->temp_path_pack.buf) == -1) { strbuf_addf(&status->error_message, "could not rename packfile to '%s'", - pack_name_tmp.buf); + params->temp_path_pack.buf); status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; goto cleanup; } @@ -1239,59 +1725,54 @@ static void install_packfile(struct gh__response_status *status, argv_array_push(&ip.args, "index-pack"); if (gh__cmd_opts.show_progress) argv_array_push(&ip.args, "-v"); - argv_array_pushl(&ip.args, "-o", idx_name_tmp.buf, NULL); - argv_array_push(&ip.args, pack_name_tmp.buf); + argv_array_pushl(&ip.args, "-o", params->temp_path_idx.buf, NULL); + argv_array_push(&ip.args, params->temp_path_pack.buf); ip.git_cmd = 1; ip.no_stdin = 1; ip.no_stdout = 1; - // TODO consider capturing stdout from index-pack because - // TODO it will contain the SHA of the packfile and we can - // TODO (should?) add it to the .pack and .idx pathnames - // TODO when we install them. - // TODO - // TODO See pipe_command() rather than run_command(). - // TODO - // TODO Or should be SHA-it ourselves (or read the last 20 bytes)? - /* - * Note that I DO NOT have a region around the index-pack process. - * The region in gh__run_one_slot() currently only covers the - * download time. This index-pack is a separate step not covered - * in the above region. Later, if/when we have CURL directly stream - * to index-pack, that region will be the combined download+index - * time. So, I'm not going to introduce it here. + * Note that I DO NOT have a trace2 region around the + * index-pack process by itself. Currently, we are inside the + * trace2 region for running the request and that's fine. + * Later, if/when we stream the download directly to + * index-pack, it will be inside under the same region anyway. + * So, I'm not going to introduce it here. */ if (run_command(&ip)) { - unlink(pack_name_tmp.buf); - unlink(idx_name_tmp.buf); + unlink(params->temp_path_pack.buf); + unlink(params->temp_path_idx.buf); strbuf_addf(&status->error_message, - "index-pack failed on '%s'", pack_name_tmp.buf); - status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; + "index-pack failed on '%s'", + params->temp_path_pack.buf); + /* + * Lets assume that index-pack failed because the + * downloaded file is corrupt (truncated). + * + * Retry it as if the network had dropped. + */ + status->retry = GH__RETRY_MODE__TRANSIENT; + status->ec = GH__ERROR_CODE__INDEX_PACK_FAILED; goto cleanup; } - if (finalize_object_file(pack_name_tmp.buf, pack_name_dst.buf) || - finalize_object_file(idx_name_tmp.buf, idx_name_dst.buf)) { - unlink(pack_name_tmp.buf); - unlink(pack_name_dst.buf); - unlink(idx_name_tmp.buf); - unlink(idx_name_dst.buf); + if (finalize_object_file(params->temp_path_pack.buf, + params->final_path_pack.buf) || + finalize_object_file(params->temp_path_idx.buf, + params->final_path_idx.buf)) { + unlink(params->temp_path_pack.buf); + unlink(params->temp_path_idx.buf); + unlink(params->final_path_pack.buf); + unlink(params->final_path_idx.buf); strbuf_addf(&status->error_message, "could not install packfile '%s'", - pack_name_dst.buf); + params->final_path_pack.buf); status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; goto cleanup; } - extract_filename(packfile_filename, &pack_name_dst); - cleanup: child_process_clear(&ip); - strbuf_release(&pack_name_tmp); - strbuf_release(&pack_name_dst); - strbuf_release(&idx_name_tmp); - strbuf_release(&idx_name_dst); } /* @@ -1361,7 +1842,250 @@ static void gh_http_cleanup(void) } /* - * Do a single HTTP request without auth-retry or fallback. + * buffer has ": [\r]\n" + */ +static void parse_resp_hdr_1(const char *buffer, size_t size, size_t nitems, + struct strbuf *key, struct strbuf *value) +{ + const char *end = buffer + (size * nitems); + const char *p; + + p = strchr(buffer, ':'); + + strbuf_setlen(key, 0); + strbuf_add(key, buffer, (p - buffer)); + + p++; /* skip ':' */ + p++; /* skip ' ' */ + + strbuf_setlen(value, 0); + strbuf_add(value, p, (end - p)); + strbuf_trim_trailing_newline(value); +} + +static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems, + void *void_params) +{ + struct gh__request_params *params = void_params; + struct gh__azure_throttle *azure = &gh__global_throttle[params->server_type]; + + if (starts_with(buffer, "X-RateLimit-")) { + struct strbuf key = STRBUF_INIT; + struct strbuf val = STRBUF_INIT; + + parse_resp_hdr_1(buffer, size, nitems, &key, &val); + + /* + * The following X- headers are specific to AzureDevOps. + * Other servers have similar sets of values, but I haven't + * compared them in depth. + * + * TODO Remove this. + */ + trace2_printf("Throttle: %s %s", key.buf, val.buf); + + if (!strcmp(key.buf, "X-RateLimit-Resource")) { + /* + * The name of the resource that is complaining. + * Just log it because we can't do anything with it. + */ + strbuf_setlen(&key, 0); + strbuf_addstr(&key, "ratelimit/resource"); + strbuf_addstr(&key, gh__server_type_label[params->server_type]); + + trace2_data_string("gvfs-helper", NULL, key.buf, val.buf); + } + + else if (!strcmp(key.buf, "X-RateLimit-Delay")) { + /* + * The amount of delay added to our response. + * Just log it because we can't do anything with it. + */ + unsigned long tarpit_delay_ms; + + strbuf_setlen(&key, 0); + strbuf_addstr(&key, "ratelimit/delay_ms"); + strbuf_addstr(&key, gh__server_type_label[params->server_type]); + + git_parse_ulong(val.buf, &tarpit_delay_ms); + + trace2_data_intmax("gvfs-helper", NULL, key.buf, tarpit_delay_ms); + } + + else if (!strcmp(key.buf, "X-RateLimit-Limit")) { + /* + * The resource limit/quota before we get a 429. + */ + git_parse_ulong(val.buf, &azure->tstu_limit); + } + + else if (!strcmp(key.buf, "X-RateLimit-Remaining")) { + /* + * The amount of our quota remaining. When zero, we + * should get 429s on futher requests until the reset + * time. + */ + git_parse_ulong(val.buf, &azure->tstu_remaining); + } + + else if (!strcmp(key.buf, "X-RateLimit-Reset")) { + /* + * The server gave us a time-in-seconds-since-the-epoch + * for when our quota will be reset (if we stop all + * activity right now). + * + * Checkpoint the local system clock so we can do some + * sanity checks on any clock skew. Also, since we get + * the headers before we get the content, we can adjust + * our delay to compensate for the full download time. + */ + unsigned long now = time(NULL); + unsigned long reset_time; + + git_parse_ulong(val.buf, &reset_time); + if (reset_time > now) + azure->reset_sec = reset_time - now; + } + + strbuf_release(&key); + strbuf_release(&val); + } + + else if (starts_with(buffer, "Retry-After")) { + struct strbuf key = STRBUF_INIT; + struct strbuf val = STRBUF_INIT; + + parse_resp_hdr_1(buffer, size, nitems, &key, &val); + + /* + * We get this header with a 429 and 503 and possibly a 30x. + * + * Curl does have CURLINFO_RETRY_AFTER that nicely parses and + * normalizes the value (and supports HTTP/1.1 usage), but it + * is not present yet in the version shipped with the Mac, so + * we do it directly here. + */ + git_parse_ulong(val.buf, &azure->retry_after_sec); + + strbuf_release(&key); + strbuf_release(&val); + } + + else if (starts_with(buffer, "X-VSS-E2EID")) { + struct strbuf key = STRBUF_INIT; + + /* + * Capture the E2EID as it goes by, but don't log it until we + * know the request result. + */ + parse_resp_hdr_1(buffer, size, nitems, &key, ¶ms->e2eid); + + strbuf_release(&key); + } + + return nitems * size; +} + +/* + * Wait "duration" seconds and drive the progress mechanism. + * + * We spin slightly faster than we need to to keep the progress bar + * drawn (especially if the user presses return while waiting) and to + * compensate for delay factors built into the progress class (which + * might wait for 2 seconds before drawing the first message). + */ +static void do_throttle_spin(struct gh__request_params *params, + const char *tr2_label, + const char *progress_msg, + int duration) +{ + struct strbuf region = STRBUF_INIT; + struct progress *progress = NULL; + unsigned long begin = time(NULL); + unsigned long now = begin; + unsigned long end = begin + duration; + + strbuf_addstr(®ion, tr2_label); + strbuf_addstr(®ion, gh__server_type_label[params->server_type]); + trace2_region_enter("gvfs-helper", region.buf, NULL); + + progress = start_progress(progress_msg, duration); + while (now < end) { + display_progress(progress, (now - begin)); + + sleep_millisec(100); + + now = time(NULL); + } + display_progress(progress, duration); + stop_progress(&progress); + + trace2_region_leave("gvfs-helper", region.buf, NULL); + strbuf_release(®ion); +} + +/* + * Delay the outbound request if necessary in response to previous throttle + * blockages or hints. Throttle data is somewhat orthogonal to the status + * results from any previous request and/or the request params of the next + * request. + * + * Note that the throttle info also is cross-process information, such as + * 2 concurrent fetches in 2 different terminal windows to the same server + * will be sharing the same server quota. These could be coordinated too, + * so that a blockage received in one process would prevent the other + * process from starting another request (and also blocked or extending + * the delay interval). We're NOT going to do that level of integration. + * We will let both processes independently attempt the next request. + * This may cause us to miss the end-of-quota boundary if the server + * extends it because of the second request. + * + * TODO Should we have a max-wait option and then return a hard-error + * TODO of some type? + */ +static void do_throttle_wait(struct gh__request_params *params, + struct gh__response_status *status) +{ + struct gh__azure_throttle *azure = + &gh__global_throttle[params->server_type]; + + if (azure->retry_after_sec) { + /* + * We were given a hard delay (such as after a 429). + * Spin until the requested time. + */ + do_throttle_spin(params, "throttle/hard", + "Waiting on hard throttle (sec)", + azure->retry_after_sec); + return; + } + + if (azure->reset_sec > 0) { + /* + * We were given a hint that we are overloading + * the server. Voluntarily backoff (before we + * get tarpitted or blocked). + */ + do_throttle_spin(params, "throttle/soft", + "Waiting on soft throttle (sec)", + azure->reset_sec); + return; + } + + if (params->k_transient_delay_sec) { + /* + * Insert an arbitrary delay before retrying after a + * transient (network) failure. + */ + do_throttle_spin(params, "throttle/transient", + "Waiting to retry after network error (sec)", + params->k_transient_delay_sec); + return; + } +} + +/* + * Do a single HTTP request WITHOUT robust-retry, auth-retry or fallback. */ static void do_req(const char *url_base, const char *url_component, @@ -1376,14 +2100,27 @@ static void do_req(const char *url_base, gh__response_status__zero(status); if (params->b_write_to_file) { - // TODO ftruncate tempfile ?? + /* Delete dirty tempfile from a previous attempt. */ + if (params->tempfile) + delete_tempfile(¶ms->tempfile); + + if (params->b_is_post) + create_tempfile_for_packfile(params, status); + else + create_tempfile_for_loose(params, status); + if (!params->tempfile || status->ec != GH__ERROR_CODE__OK) + return; } else { + /* Guard against caller using dirty buffer */ strbuf_setlen(params->buffer, 0); } end_url_with_slash(&rest_url, url_base); strbuf_addstr(&rest_url, url_component); + do_throttle_wait(params, status); + gh__azure_throttle__zero(&gh__global_throttle[params->server_type]); + slot = get_active_slot(); slot->results = &results; @@ -1412,6 +2149,9 @@ static void do_req(const char *url_base, curl_easy_setopt(slot->curl, CURLOPT_FILE, params->buffer); } + curl_easy_setopt(slot->curl, CURLOPT_HEADERFUNCTION, parse_resp_hdr); + curl_easy_setopt(slot->curl, CURLOPT_HEADERDATA, params); + if (creds && creds->username) { /* * Force CURL to respect the username/password we provide by @@ -1438,8 +2178,8 @@ static void do_req(const char *url_base, * with the main Git server (because the cache-server * doesn't handle 401s). * - * TODO Think about if we really need to handle this case - * TODO and if so, add an .is_main to params. + * TODO Think about if we really need to handle this case. + * TODO Guard with "if (params->sever_type == __MAIN)" */ curl_easy_setopt(slot->curl, CURLOPT_HTTPAUTH, CURLAUTH_ANY); } @@ -1457,20 +2197,123 @@ static void do_req(const char *url_base, gh__run_one_slot(slot, params, status); } +/* + * Compute the delay for the nth attempt. + * + * No delay for the first attempt. Then use a normal exponential backoff + * starting from 8. + */ +static int compute_transient_delay(int attempt) +{ + int v; + + if (attempt < 1) + return 0; + + /* + * Let 8K be our hard limit (for integer overflow protection). + * That's over 2 hours. This is 8<<10. + */ + if (attempt > 10) + attempt = 10; + + v = 8 << (attempt - 1); + + if (v > gh__cmd_opts.max_transient_backoff_sec) + v = gh__cmd_opts.max_transient_backoff_sec; + + return v; +} + +/* + * Robustly make an HTTP request. Retry if necessary to hide common + * transient network errors and/or 429 blockages. + * + * For a transient (network) failure (where we do not have a throttle + * delay factor), we should insert a small delay to let the network + * recover. The outage might be because the VPN dropped, or the + * machine went to sleep or something and we want to give the network + * time to come back up. Insert AI here :-) + */ +static void do_req__with_robust_retry(const char *url_base, + const char *url_component, + const struct credential *creds, + struct gh__request_params *params, + struct gh__response_status *status) +{ + for (params->k_attempt = 0; + params->k_attempt < gh__cmd_opts.max_retries + 1; + params->k_attempt++) { + + do_req(url_base, url_component, creds, params, status); + + switch (status->retry) { + default: + case GH__RETRY_MODE__SUCCESS: + case GH__RETRY_MODE__HTTP_401: /* caller does auth-retry */ + case GH__RETRY_MODE__HARD_FAIL: + case GH__RETRY_MODE__FAIL_404: + return; + + case GH__RETRY_MODE__HTTP_429: + case GH__RETRY_MODE__HTTP_503: + /* + * We should have gotten a "Retry-After" header with + * these and that gives us the wait time. If not, + * fallthru and use the backoff delay. + */ + if (gh__global_throttle[params->server_type].retry_after_sec) + continue; + /*fallthru*/ + + case GH__RETRY_MODE__TRANSIENT: + params->k_transient_delay_sec = + compute_transient_delay(params->k_attempt); + continue; + } + } +} + static void do_req__to_main(const char *url_component, struct gh__request_params *params, struct gh__response_status *status) { -// lookup_main_creds(); + params->server_type = GH__SERVER_TYPE__MAIN; - do_req(gh__global.main_url, url_component, &gh__global.main_creds, - params, status); + /* + * When talking to the main Git server, we do allow non-auth'd + * initial requests (mainly for testing, since production servers + * will usually always want creds), so we DO NOT force load the + * user's creds here and let the normal 401 mechanism handle things. + * This has a slight perf penalty -- if we're bulk fetching 4000 + * objects from a production server, we're going to do a 100K+ byte + * POST just to get a 401 and then repeat the 100K+ byte POST + * with the creds. + * + * TODO Consider making a test or runtime flag to alter this. + * TODO If set/not-set, add a call here to pre-populate the creds. + * TODO + * TODO lookup_main_creds(); + * TODO + * + * TODO Testing with GIT_TRACE_CURL shows that curl will *SOMETIME* + * TODO send an "Expect: 100-continue" and silently handle/eat the + * TODO "HTTP/1.1 100 Continue" before the POST-body is sent, + * TODO so maybe this isn't an issue. (Other than the extra + * TODO roundtrip for the first set of headers.) More testing here + * TODO should be performed. + */ + + do_req__with_robust_retry(gh__global.main_url, url_component, + &gh__global.main_creds, + params, status); if (status->response_code == 401) { refresh_main_creds(); - do_req(gh__global.main_url, url_component, &gh__global.main_creds, - params, status); + do_req__with_robust_retry(gh__global.main_url, url_component, + &gh__global.main_creds, + params, status); } if (status->response_code == 200) @@ -1481,29 +2324,36 @@ static void do_req__to_cache_server(const char *url_component, struct gh__request_params *params, struct gh__response_status *status) { + params->server_type = GH__SERVER_TYPE__CACHE; + synthesize_cache_server_creds(); - do_req(gh__global.cache_server_url, url_component, &gh__global.cache_creds, - params, status); - fixup_cache_server_400_to_401(status); + do_req__with_robust_retry(gh__global.cache_server_url, url_component, + &gh__global.cache_creds, + params, status); if (status->response_code == 401) { refresh_cache_server_creds(); - do_req(gh__global.cache_server_url, url_component, - &gh__global.cache_creds, params, status); - fixup_cache_server_400_to_401(status); + do_req__with_robust_retry(gh__global.cache_server_url, + url_component, + &gh__global.cache_creds, + params, status); } if (status->response_code == 200) approve_cache_server_creds(); } +/* + * Try the cache-server (if configured) then fall-back to the main Git server. + */ static void do_req__with_fallback(const char *url_component, struct gh__request_params *params, struct gh__response_status *status) { - if (gh__global.cache_server_url && !params->b_no_cache_server) { + if (gh__global.cache_server_url && + params->b_permit_cache_server_if_defined) { do_req__to_cache_server(url_component, params, status); if (status->response_code == 200) @@ -1538,11 +2388,12 @@ static void do__gvfs_config(struct gh__response_status *status, { struct gh__request_params params = GH__REQUEST_PARAMS_INIT; - strbuf_addstr(¶ms.label, "GET/config"); + strbuf_addstr(¶ms.tr2_label, "GET/config"); params.b_is_post = 0; params.b_write_to_file = 0; - params.b_no_cache_server = 1; /* they don't handle gvfs/config API */ + /* cache-servers do not handle gvfs/config REST calls */ + params.b_permit_cache_server_if_defined = 0; params.buffer = config_data; params.object_count = 1; /* a bit of a lie */ @@ -1587,11 +2438,11 @@ static void do__loose__gvfs_object(struct gh__response_status *status, strbuf_addf(&component_url, "gvfs/objects/%s", oid_to_hex(oid)); - strbuf_addstr(¶ms.label, "GET/objects"); + strbuf_addstr(¶ms.tr2_label, "GET/objects"); params.b_is_post = 0; params.b_write_to_file = 1; - params.b_no_cache_server = 0; + params.b_permit_cache_server_if_defined = 1; params.object_count = 1; @@ -1601,9 +2452,7 @@ static void do__loose__gvfs_object(struct gh__response_status *status, params.headers = curl_slist_append(params.headers, "Pragma: no-cache"); - create_tempfile_for_loose(¶ms, status, oid); - if (!params.tempfile) - goto cleanup; + oidcpy(¶ms.loose_oid, oid); if (gh__cmd_opts.show_progress) { /* @@ -1618,10 +2467,6 @@ static void do__loose__gvfs_object(struct gh__response_status *status, do_req__with_fallback(component_url.buf, ¶ms, status); - if (status->ec == GH__ERROR_CODE__OK) - install_loose(¶ms, status); - -cleanup: gh__request_params__release(¶ms); strbuf_release(&component_url); } @@ -1634,23 +2479,26 @@ static void do__loose__gvfs_object(struct gh__response_status *status, static void do__packfile__gvfs_objects(struct gh__response_status *status, struct oidset_iter *iter, unsigned long nr_wanted_in_block, + int j_pack_num, int j_pack_den, struct strbuf *output_filename, - unsigned long *nr_taken) + unsigned long *nr_oid_taken) { struct json_writer jw_req = JSON_WRITER_INIT; struct gh__request_params params = GH__REQUEST_PARAMS_INIT; + strbuf_setlen(output_filename, 0); + gh__response_status__zero(status); params.object_count = build_json_payload__gvfs_objects( &jw_req, iter, nr_wanted_in_block); - *nr_taken = params.object_count; + *nr_oid_taken = params.object_count; - strbuf_addstr(¶ms.label, "POST/objects"); + strbuf_addstr(¶ms.tr2_label, "POST/objects"); params.b_is_post = 1; params.b_write_to_file = 1; - params.b_no_cache_server = 0; + params.b_permit_cache_server_if_defined = 1; params.post_payload = &jw_req.json; @@ -1675,73 +2523,21 @@ static void do__packfile__gvfs_objects(struct gh__response_status *status, params.headers = curl_slist_append(params.headers, "Accept: application/x-git-loose-object"); - params.tempfile = create_tempfile_for_packfile(); - if (!params.tempfile) { - strbuf_addstr(&status->error_message, - "could not create tempfile for packfile"); - status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; - goto cleanup; - } - if (gh__cmd_opts.show_progress) { strbuf_addf(¶ms.progress_base_phase2_msg, - "Requesting packfile with %ld objects", + "Requesting packfile %d/%d with %ld objects", + j_pack_num, j_pack_den, params.object_count); strbuf_addf(¶ms.progress_base_phase3_msg, - "Receiving packfile with %ld objects", + "Receiving packfile %d/%d with %ld objects", + j_pack_num, j_pack_den, params.object_count); } do_req__with_fallback("gvfs/objects", ¶ms, status); + if (status->ec == GH__ERROR_CODE__OK) + strbuf_addbuf(output_filename, ¶ms.final_packfile_filename); - if (status->ec == GH__ERROR_CODE__OK) { - if (!strcmp(status->content_type.buf, - "application/x-git-packfile")) { - - // TODO Consider having a worker thread to manage - // TODO running index-pack and then install the - // TODO resulting .idx and .pack files. This would - // TODO let us interleave those steps with our thread - // TODO fetching the next block of objects from the - // TODO server. (Need to think about how progress - // TODO messages from our thread and index-pack - // TODO would mesh.) - // TODO - // TODO But then again, if we hack index-pack to write - // TODO to our alternate and stream the data thru it, - // TODO it won't matter. - - install_packfile(status, ¶ms.tempfile, - output_filename); - goto cleanup; - } - - if (!strcmp(status->content_type.buf, - "application/x-git-loose-object")) - { - /* - * This should not happen (when we request - * more than one object). The server can send - * us a loose object (even when we use the - * POST form) if there is only one object in - * the payload (and despite the set of accept - * headers we send), so I'm going to leave - * this here. - */ - strbuf_addstr(&status->error_message, - "received loose object when packfile expected"); - status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; - goto cleanup; - } - - strbuf_addf(&status->error_message, - "received unknown content-type '%s'", - status->content_type.buf); - status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; - goto cleanup; - } - -cleanup: gh__request_params__release(¶ms); jw_release(&jw_req); } @@ -1755,7 +2551,7 @@ static void do__packfile__gvfs_objects(struct gh__response_status *status, */ static void do_fetch_oidset(struct gh__response_status *status, struct oidset *oids, - unsigned long nr_total, + unsigned long nr_oid_total, struct string_list *result_list) { struct oidset_iter iter; @@ -1764,19 +2560,25 @@ static void do_fetch_oidset(struct gh__response_status *status, struct strbuf err404 = STRBUF_INIT; const struct object_id *oid; unsigned long k; - unsigned long nr_taken; + unsigned long nr_oid_taken; int had_404 = 0; + int j_pack_den = 0; + int j_pack_num = 0; gh__response_status__zero(status); - if (!nr_total) + if (!nr_oid_total) return; + if (nr_oid_total > 1) + j_pack_den = ((nr_oid_total + gh__cmd_opts.block_size - 1) + / gh__cmd_opts.block_size); + oidset_iter_init(oids, &iter); - for (k = 0; k < nr_total; k += nr_taken) { - if (nr_total - k == 1 || gh__cmd_opts.block_size == 1) { + for (k = 0; k < nr_oid_total; k += nr_oid_taken) { + if (nr_oid_total - k == 1 || gh__cmd_opts.block_size == 1) { oid = oidset_iter_next(&iter); - nr_taken = 1; + nr_oid_taken = 1; do__loose__gvfs_object(status, oid); @@ -1811,10 +2613,13 @@ static void do_fetch_oidset(struct gh__response_status *status, } else { strbuf_setlen(&output_filename, 0); + j_pack_num++; + do__packfile__gvfs_objects(status, &iter, gh__cmd_opts.block_size, + j_pack_num, j_pack_den, &output_filename, - &nr_taken); + &nr_oid_taken); /* * Because the oidset iterator has random @@ -1930,6 +2735,8 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) N_("number of objects to request at a time")), OPT_INTEGER('d', "depth", &gh__cmd_opts.depth, N_("Commit depth")), + OPT_INTEGER('r', "max-retries", &gh__cmd_opts.max_retries, + N_("retries for transient network errors")), OPT_END(), }; @@ -1937,7 +2744,7 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) struct oidset oids = OIDSET_INIT; struct string_list result_list = STRING_LIST_INIT_DUP; enum gh__error_code ec = GH__ERROR_CODE__OK; - unsigned long nr_total; + unsigned long nr_oid_total; int k; trace2_cmd_mode("get"); @@ -1948,14 +2755,16 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) argc = parse_options(argc, argv, NULL, get_options, get_usage, 0); if (gh__cmd_opts.depth < 1) gh__cmd_opts.depth = 1; + if (gh__cmd_opts.max_retries < 0) + gh__cmd_opts.max_retries = 0; finish_init(1); - nr_total = read_stdin_from_rev_list(&oids); + nr_oid_total = read_stdin_from_rev_list(&oids); trace2_region_enter("gvfs-helper", "get", NULL); - trace2_data_intmax("gvfs-helper", NULL, "get/nr_objects", nr_total); - do_fetch_oidset(&status, &oids, nr_total, &result_list); + trace2_data_intmax("gvfs-helper", NULL, "get/nr_objects", nr_oid_total); + do_fetch_oidset(&status, &oids, nr_oid_total, &result_list); trace2_region_leave("gvfs-helper", "get", NULL); ec = status.ec; @@ -1990,7 +2799,7 @@ static enum gh__error_code do_server_subprocess_get(void) int len; int err; int k; - unsigned long nr_total = 0; + unsigned long nr_oid_total = 0; /* * Inside the "get" command, we expect a list of OIDs @@ -2008,10 +2817,10 @@ static enum gh__error_code do_server_subprocess_get(void) } if (!oidset_insert(&oids, &oid)) - nr_total++; + nr_oid_total++; } - if (!nr_total) { + if (!nr_oid_total) { if (packet_write_fmt_gently(1, "ok\n")) { error("server: cannot write 'get' result to client"); ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; @@ -2021,8 +2830,8 @@ static enum gh__error_code do_server_subprocess_get(void) } trace2_region_enter("gvfs-helper", "server/get", NULL); - trace2_data_intmax("gvfs-helper", NULL, "server/get/nr_objects", nr_total); - do_fetch_oidset(&status, &oids, nr_total, &result_list); + trace2_data_intmax("gvfs-helper", NULL, "server/get/nr_objects", nr_oid_total); + do_fetch_oidset(&status, &oids, nr_oid_total, &result_list); trace2_region_leave("gvfs-helper", "server/get", NULL); /* @@ -2180,6 +2989,8 @@ static enum gh__error_code do_sub_cmd__server(int argc, const char **argv) N_("number of objects to request at a time")), OPT_INTEGER('d', "depth", &gh__cmd_opts.depth, N_("Commit depth")), + OPT_INTEGER('r', "max-retries", &gh__cmd_opts.max_retries, + N_("retries for transient network errors")), OPT_END(), }; @@ -2196,6 +3007,8 @@ static enum gh__error_code do_sub_cmd__server(int argc, const char **argv) argc = parse_options(argc, argv, NULL, server_options, server_usage, 0); if (gh__cmd_opts.depth < 1) gh__cmd_opts.depth = 1; + if (gh__cmd_opts.max_retries < 0) + gh__cmd_opts.max_retries = 0; finish_init(1); @@ -2285,13 +3098,23 @@ int cmd_main(int argc, const char **argv) setup_git_directory_gently(NULL); - git_config(git_default_config, NULL); - /* Set any non-zero initial values in gh__cmd_opts. */ - gh__cmd_opts.depth = 1; + gh__cmd_opts.depth = GH__DEFAULT_COMMIT_DEPTH; gh__cmd_opts.block_size = GH__DEFAULT_BLOCK_SIZE; + gh__cmd_opts.max_retries = GH__DEFAULT_MAX_RETRIES; + gh__cmd_opts.max_transient_backoff_sec = + GH__DEFAULT_MAX_TRANSIENT_BACKOFF_SEC; + gh__cmd_opts.show_progress = !!isatty(2); + // TODO use existing gvfs config settings to override our GH__DEFAULT_ + // TODO values in gh__cmd_opts. (And maybe add/remove our command line + // TODO options for them.) + // TODO + // TODO See "scalar.max-retries" (and maybe "gvfs.max-retries") + + git_config(git_default_config, NULL); + argc = parse_options(argc, argv, NULL, main_options, main_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (argc == 0) From 6c5d207a13d2807513f45c7edc7347b4e210d0a8 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Mon, 11 Nov 2019 15:09:31 -0500 Subject: [PATCH 080/104] gvfs-helper: add prefetch support Teach gvfs-helper to support "/gvfs/prefetch" REST API. This includes a new `gvfs-helper prefetch --since=` command line option. And a new `objects.prefetch` verb in `gvfs-helper server` mode. If `since` argument is omitted, `gvfs-helper` will search the local shared-cache for the most recent prefetch packfile and start from there. The is usually a seconds-since-epoch, but may also be a "friendly" date -- such as "midnight", "yesterday" and etc. using the existing date selection mechanism. Add `gh_client__prefetch()` API to allow `git.exe` to easily call prefetch (and using the same long-running process as immediate and queued object fetches). Expanded t5799 unit tests to include prefetch tests. Test setup now also builds some commits-and-trees packfiles for testing purposes with well-known timestamps. Expanded t/helper/test-gvfs-protocol.exe to support "/gvfs/prefetch" REST API. Massive refactor of existing packfile handling in gvfs-helper.c to reuse more code between "/gvfs/objects POST" and "/gvfs/prefetch". With this we now properly name packfiles with the checksum SHA1 rather than a date string. Refactor also addresses some of the confusing tempfile setup and install_ code processing (introduced to handle the ambiguity of how POST works with commit objects). Signed-off-by: Jeff Hostetler --- gvfs-helper-client.c | 129 +++- gvfs-helper-client.h | 18 + gvfs-helper.c | 1167 +++++++++++++++++++++++++-------- t/helper/test-gvfs-protocol.c | 316 ++++++++- t/t5799-gvfs-helper.sh | 204 ++++-- 5 files changed, 1494 insertions(+), 340 deletions(-) diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c index 9bb880338876bf..d5ce9970de08c5 100644 --- a/gvfs-helper-client.c +++ b/gvfs-helper-client.c @@ -24,13 +24,14 @@ static struct hashmap gh_server__subprocess_map; static struct object_directory *gh_client__chosen_odb; /* - * The "objects" capability has 2 verbs: "get" and "post". + * The "objects" capability has verbs: "get" and "post" and "prefetch". */ #define CAP_OBJECTS (1u<<1) #define CAP_OBJECTS_NAME "objects" #define CAP_OBJECTS__VERB_GET1_NAME "get" #define CAP_OBJECTS__VERB_POST_NAME "post" +#define CAP_OBJECTS__VERB_PREFETCH_NAME "prefetch" static int gh_client__start_fn(struct subprocess_entry *subprocess) { @@ -129,6 +130,44 @@ static int gh_client__send__objects_get(struct child_process *process, return 0; } +/* + * Send a request to gvfs-helper to prefetch packfiles from either the + * cache-server or the main Git server using "/gvfs/prefetch". + * + * objects.prefetch LF + * [ LF] + * + */ +static int gh_client__send__objects_prefetch(struct child_process *process, + timestamp_t seconds_since_epoch) +{ + int err; + + /* + * We assume that all of the packet_ routines call error() + * so that we don't have to. + */ + + err = packet_write_fmt_gently( + process->in, + (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_PREFETCH_NAME "\n")); + if (err) + return err; + + if (seconds_since_epoch) { + err = packet_write_fmt_gently(process->in, "%" PRItime "\n", + seconds_since_epoch); + if (err) + return err; + } + + err = packet_flush_gently(process->in); + if (err) + return err; + + return 0; +} + /* * Verify that the pathname found in the "odb" response line matches * what we requested. @@ -198,7 +237,7 @@ static void gh_client__update_packed_git(const char *line) } /* - * Both CAP_OBJECTS verbs return the same format response: + * CAP_OBJECTS verbs return the same format response: * * * * @@ -238,6 +277,8 @@ static int gh_client__objects__receive_response( const char *v1; char *line; int len; + int nr_loose = 0; + int nr_packfile = 0; int err = 0; while (1) { @@ -256,13 +297,13 @@ static int gh_client__objects__receive_response( else if (starts_with(line, "packfile")) { gh_client__update_packed_git(line); ghc |= GHC__CREATED__PACKFILE; - *p_nr_packfile += 1; + nr_packfile++; } else if (starts_with(line, "loose")) { gh_client__update_loose_cache(line); ghc |= GHC__CREATED__LOOSE; - *p_nr_loose += 1; + nr_loose++; } else if (starts_with(line, "ok")) @@ -276,6 +317,8 @@ static int gh_client__objects__receive_response( } *p_ghc = ghc; + *p_nr_loose = nr_loose; + *p_nr_packfile = nr_packfile; return err; } @@ -332,7 +375,7 @@ static struct gh_server__process *gh_client__find_long_running_process( /* * Find an existing long-running process with the above command * line -or- create a new long-running process for this and - * subsequent 'get' requests. + * subsequent requests. */ if (!gh_server__subprocess_map_initialized) { gh_server__subprocess_map_initialized = 1; @@ -369,10 +412,14 @@ static struct gh_server__process *gh_client__find_long_running_process( void gh_client__queue_oid(const struct object_id *oid) { - // TODO consider removing this trace2. it is useful for interactive - // TODO debugging, but may generate way too much noise for a data - // TODO event. - trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid)); + /* + * Keep this trace as a printf only, so that it goes to the + * perf log, but not the event log. It is useful for interactive + * debugging, but generates way too much (unuseful) noise for the + * database. + */ + if (trace2_is_enabled()) + trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid)); if (!oidset_insert(&gh_client__oidset_queued, oid)) gh_client__oidset_count++; @@ -453,10 +500,14 @@ int gh_client__get_immediate(const struct object_id *oid, int nr_packfile = 0; int err = 0; - // TODO consider removing this trace2. it is useful for interactive - // TODO debugging, but may generate way too much noise for a data - // TODO event. - trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid)); + /* + * Keep this trace as a printf only, so that it goes to the + * perf log, but not the event log. It is useful for interactive + * debugging, but generates way too much (unuseful) noise for the + * database. + */ + if (trace2_is_enabled()) + trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid)); entry = gh_client__find_long_running_process(CAP_OBJECTS); if (!entry) @@ -485,3 +536,55 @@ int gh_client__get_immediate(const struct object_id *oid, return err; } + +/* + * Ask gvfs-helper to prefetch commits-and-trees packfiles since a + * given timestamp. + * + * If seconds_since_epoch is zero, gvfs-helper will scan the ODB for + * the last received prefetch and ask for ones newer than that. + */ +int gh_client__prefetch(timestamp_t seconds_since_epoch, + int *nr_packfiles_received) +{ + struct gh_server__process *entry; + struct child_process *process; + enum gh_client__created ghc; + int nr_loose = 0; + int nr_packfile = 0; + int err = 0; + + entry = gh_client__find_long_running_process(CAP_OBJECTS); + if (!entry) + return -1; + + trace2_region_enter("gh-client", "objects/prefetch", the_repository); + trace2_data_intmax("gh-client", the_repository, "prefetch/since", + seconds_since_epoch); + + process = &entry->subprocess.process; + + sigchain_push(SIGPIPE, SIG_IGN); + + err = gh_client__send__objects_prefetch(process, seconds_since_epoch); + if (!err) + err = gh_client__objects__receive_response( + process, &ghc, &nr_loose, &nr_packfile); + + sigchain_pop(SIGPIPE); + + if (err) { + subprocess_stop(&gh_server__subprocess_map, + (struct subprocess_entry *)entry); + FREE_AND_NULL(entry); + } + + trace2_data_intmax("gh-client", the_repository, + "prefetch/packfile_count", nr_packfile); + trace2_region_leave("gh-client", "objects/prefetch", the_repository); + + if (nr_packfiles_received) + *nr_packfiles_received = nr_packfile; + + return err; +} diff --git a/gvfs-helper-client.h b/gvfs-helper-client.h index c1e38fad75f841..7692534ecda54c 100644 --- a/gvfs-helper-client.h +++ b/gvfs-helper-client.h @@ -66,4 +66,22 @@ void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr); */ int gh_client__drain_queue(enum gh_client__created *p_ghc); +/* + * Ask `gvfs-helper server` to fetch any "prefetch packs" + * available on the server more recent than the requested time. + * + * If seconds_since_epoch is zero, gvfs-helper will scan the ODB for + * the last received prefetch and ask for ones newer than that. + * + * A long-running background process is used to subsequent requests + * (either prefetch or regular immediate/queued requests) more efficient. + * + * One or more packfiles will be created in the shared-cache ODB. + * + * Returns 0 on success, -1 on error. Optionally also returns the + * number of prefetch packs received. + */ +int gh_client__prefetch(timestamp_t seconds_since_epoch, + int *nr_packfiles_received); + #endif /* GVFS_HELPER_CLIENT_H */ diff --git a/gvfs-helper.c b/gvfs-helper.c index d3aa95cc53601d..129c2f429eb08e 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -22,7 +22,7 @@ // // error := verify cache-server and abort if not well-known. // -// trust := do not verify cache-server. just use it. +// trust := do not verify cache-server. just use it, if set. // // disable := disable the cache-server and always use the main // Git server. @@ -87,6 +87,24 @@ // Number of retries after transient network errors. // Set to zero to disable such retries. // +// prefetch +// +// Use "/gvfs/prefetch" REST API to fetch 1 or more commits-and-trees +// prefetch packs from the server. +// +// : +// +// --since= // defaults to "0" +// +// Time in seconds since the epoch. If omitted or +// zero, the timestamp from the newest prefetch +// packfile found in the shared-cache ODB is used. +// (This is based upon the packfile name, not the +// mtime.) +// +// The GVFS Protocol defines this value as a way to +// request cached packfiles NEWER THAN this timestamp. +// // server // // Interactive/sub-process mode. Listen for a series of commands @@ -116,20 +134,36 @@ // // Each object will be created as a loose object in the ODB. // +// Create 1 or more loose objects in the shared-cache ODB. +// (The pathname of the selected ODB is reported at the +// beginning of the response; this should match the pathname +// given on the command line). +// +// git> objects.get +// git> +// git> +// git> ... +// git> +// git> 0000 +// +// git< odb +// git< loose +// git< loose +// git< ... +// git< loose +// git< ok | partial | error +// git< 0000 +// // Interactive verb: objects.post // // Fetch 1 or more objects, in bulk, using one or more // "/gvfs/objects" POST requests. // -// For both verbs, if a cache-server is configured, try it first. -// Optionally fallback to the main Git server. -// // Create 1 or more loose objects and/or packfiles in the -// shared-cache ODB. (The pathname of the selected ODB is -// reported at the beginning of the response; this should -// match the pathname given on the command line). +// shared-cache ODB. A POST is allowed to respond with +// either loose or packed objects. // -// git> objects.get | objects.post +// git> objects.post // git> // git> // git> ... @@ -139,11 +173,31 @@ // git< odb // git< loose | packfile // git< loose | packfile -// gid< ... +// git< ... // git< loose | packfile // git< ok | partial | error // git< 0000 -// +// +// Interactive verb: object.prefetch +// +// Fetch 1 or more prefetch packs using a "/gvfs/prefetch" +// request. +// +// git> objects.prefetch +// git> // optional +// git> 0000 +// +// git< odb +// git< packfile +// git< packfile +// git< ... +// git< packfile +// git< ok | error +// git< 0000 +// +// If a cache-server is configured, try it first. +// Optionally fallback to the main Git server. +// // [1] Documentation/technical/protocol-common.txt // [2] Documentation/technical/long-running-process-protocol.txt // [3] See GIT_TRACE_PACKET @@ -176,11 +230,15 @@ #include "oidset.h" #include "dir.h" #include "progress.h" +#include "packfile.h" + +#define TR2_CAT "gvfs-helper" static const char * const main_usage[] = { N_("git gvfs-helper [] config []"), N_("git gvfs-helper [] get []"), N_("git gvfs-helper [] post []"), + N_("git gvfs-helper [] prefetch []"), N_("git gvfs-helper [] server []"), NULL }; @@ -195,6 +253,11 @@ static const char *const objects_post_usage[] = { NULL }; +static const char *const prefetch_usage[] = { + N_("git gvfs-helper [] prefetch []"), + NULL +}; + static const char *const server_usage[] = { N_("git gvfs-helper [] server []"), NULL @@ -239,6 +302,7 @@ enum gh__error_code { GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE = 11, GH__ERROR_CODE__SUBPROCESS_SYNTAX = 12, GH__ERROR_CODE__INDEX_PACK_FAILED = 13, + GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH = 14, }; enum gh__cache_server_mode { @@ -303,6 +367,8 @@ static const char *gh__server_type_label[GH__SERVER_TYPE__NR] = { }; enum gh__objects_mode { + GH__OBJECTS_MODE__NONE = 0, + /* * Bulk fetch objects. * @@ -322,6 +388,12 @@ enum gh__objects_mode { * object treatment). */ GH__OBJECTS_MODE__GET, + + /* + * Fetch one or more pre-computed "prefetch packs" containing + * commits and trees. + */ + GH__OBJECTS_MODE__PREFETCH, }; struct gh__azure_throttle @@ -396,6 +468,7 @@ struct gh__request_params { int b_write_to_file; /* write to file=1 or strbuf=0 */ int b_permit_cache_server_if_defined; + enum gh__objects_mode objects_mode; enum gh__server_type server_type; int k_attempt; /* robust retry attempt */ @@ -410,15 +483,8 @@ struct gh__request_params { struct strbuf *buffer; /* for response content when strbuf */ struct strbuf tr2_label; /* for trace2 regions */ - struct strbuf loose_path; struct object_id loose_oid; - struct strbuf temp_path_pack; - struct strbuf temp_path_idx; - struct strbuf final_path_pack; - struct strbuf final_path_idx; - struct strbuf final_packfile_filename; - /* * Note that I am putting all of the progress-related instance data * inside the request-params in the hope that we can eventually @@ -458,13 +524,7 @@ struct gh__request_params { .tempfile = NULL, \ .buffer = NULL, \ .tr2_label = STRBUF_INIT, \ - .loose_path = STRBUF_INIT, \ .loose_oid = {{0}}, \ - .temp_path_pack = STRBUF_INIT, \ - .temp_path_idx = STRBUF_INIT, \ - .final_path_pack = STRBUF_INIT, \ - .final_path_idx = STRBUF_INIT, \ - .final_packfile_filename = STRBUF_INIT, \ .progress_state = GH__PROGRESS_STATE__START, \ .progress_base_phase2_msg = STRBUF_INIT, \ .progress_base_phase3_msg = STRBUF_INIT, \ @@ -489,12 +549,6 @@ static void gh__request_params__release(struct gh__request_params *params) params->buffer = NULL; /* we do not own this */ strbuf_release(¶ms->tr2_label); - strbuf_release(¶ms->loose_path); - strbuf_release(¶ms->temp_path_pack); - strbuf_release(¶ms->temp_path_idx); - strbuf_release(¶ms->final_path_pack); - strbuf_release(¶ms->final_path_idx); - strbuf_release(¶ms->final_packfile_filename); strbuf_release(¶ms->progress_base_phase2_msg); strbuf_release(¶ms->progress_base_phase3_msg); @@ -585,9 +639,7 @@ static void gh__response_status__zero(struct gh__response_status *s) s->azure = NULL; } -static void install_packfile(struct gh__request_params *params, - struct gh__response_status *status); -static void install_loose(struct gh__request_params *params, +static void install_result(struct gh__request_params *params, struct gh__response_status *status); /* @@ -624,7 +676,7 @@ static void log_e2eid(struct gh__request_params *params, strbuf_addstr(&key, "e2eid"); strbuf_addstr(&key, gh__server_type_label[params->server_type]); - trace2_data_string("gvfs-helper", NULL, key.buf, + trace2_data_string(TR2_CAT, NULL, key.buf, params->e2eid.buf); strbuf_release(&key); @@ -724,7 +776,7 @@ static void compute_retry_mode_from_http_response( status->retry = GH__RETRY_MODE__HTTP_429; status->ec = GH__ERROR_CODE__HTTP_429; - trace2_data_string("gvfs-helper", NULL, "error/http", + trace2_data_string(TR2_CAT, NULL, "error/http", status->error_message.buf); return; @@ -737,7 +789,7 @@ static void compute_retry_mode_from_http_response( status->retry = GH__RETRY_MODE__HTTP_503; status->ec = GH__ERROR_CODE__HTTP_503; - trace2_data_string("gvfs-helper", NULL, "error/http", + trace2_data_string(TR2_CAT, NULL, "error/http", status->error_message.buf); return; @@ -751,7 +803,7 @@ static void compute_retry_mode_from_http_response( status->retry = GH__RETRY_MODE__HARD_FAIL; status->ec = GH__ERROR_CODE__HTTP_OTHER; - trace2_data_string("gvfs-helper", NULL, "error/http", + trace2_data_string(TR2_CAT, NULL, "error/http", status->error_message.buf); return; } @@ -885,7 +937,7 @@ static void compute_retry_mode_from_curl_error( status->retry = GH__RETRY_MODE__HARD_FAIL; status->ec = GH__ERROR_CODE__CURL_ERROR; - trace2_data_string("gvfs-helper", NULL, "error/curl", + trace2_data_string(TR2_CAT, NULL, "error/curl", status->error_message.buf); return; @@ -895,7 +947,7 @@ static void compute_retry_mode_from_curl_error( status->retry = GH__RETRY_MODE__TRANSIENT; status->ec = GH__ERROR_CODE__CURL_ERROR; - trace2_data_string("gvfs-helper", NULL, "error/curl", + trace2_data_string(TR2_CAT, NULL, "error/curl", status->error_message.buf); return; } @@ -1089,7 +1141,7 @@ static void gh__run_one_slot(struct active_request_slot *slot, params->progress_state = GH__PROGRESS_STATE__START; strbuf_setlen(¶ms->e2eid, 0); - trace2_region_enter("gvfs-helper", key.buf, NULL); + trace2_region_enter(TR2_CAT, key.buf, NULL); if (!start_active_slot(slot)) { compute_retry_mode_from_curl_error(status, @@ -1113,7 +1165,7 @@ static void gh__run_one_slot(struct active_request_slot *slot, * (such as when we request a commit). */ strbuf_addstr(&key, "/nr_bytes"); - trace2_data_intmax("gvfs-helper", NULL, + trace2_data_intmax(TR2_CAT, NULL, key.buf, status->bytes_received); strbuf_setlen(&key, old_len); @@ -1123,16 +1175,10 @@ static void gh__run_one_slot(struct active_request_slot *slot, if (params->progress) stop_progress(¶ms->progress); - if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file) { - if (params->b_is_post && - !strcmp(status->content_type.buf, - "application/x-git-packfile")) - install_packfile(params, status); - else - install_loose(params, status); - } + if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file) + install_result(params, status); - trace2_region_leave("gvfs-helper", key.buf, NULL); + trace2_region_leave(TR2_CAT, key.buf, NULL); strbuf_release(&key); } @@ -1279,7 +1325,7 @@ static void lookup_main_url(void) */ gh__global.main_url = transport_anonymize_url(gh__global.remote->url[0]); - trace2_data_string("gvfs-helper", NULL, "remote/url", gh__global.main_url); + trace2_data_string(TR2_CAT, NULL, "remote/url", gh__global.main_url); } static void do__http_get__gvfs_config(struct gh__response_status *status, @@ -1306,10 +1352,23 @@ static void select_cache_server(void) gh__global.cache_server_url = NULL; if (gh__cmd_opts.cache_server_mode == GH__CACHE_SERVER_MODE__DISABLE) { - trace2_data_string("gvfs-helper", NULL, "cache/url", "disabled"); + trace2_data_string(TR2_CAT, NULL, "cache/url", "disabled"); return; } + if (!gvfs_cache_server_url || !*gvfs_cache_server_url) { + switch (gh__cmd_opts.cache_server_mode) { + default: + case GH__CACHE_SERVER_MODE__TRUST_WITHOUT_VERIFY: + case GH__CACHE_SERVER_MODE__VERIFY_DISABLE: + trace2_data_string(TR2_CAT, NULL, "cache/url", "unset"); + return; + + case GH__CACHE_SERVER_MODE__VERIFY_ERROR: + die("cache-server not set"); + } + } + /* * If the cache-server and main Git server have the same URL, we * can silently disable the cache-server (by NOT setting the field @@ -1317,14 +1376,14 @@ static void select_cache_server(void) */ if (!strcmp(gvfs_cache_server_url, gh__global.main_url)) { gh__cmd_opts.try_fallback = 0; - trace2_data_string("gvfs-helper", NULL, "cache/url", "same"); + trace2_data_string(TR2_CAT, NULL, "cache/url", "same"); return; } if (gh__cmd_opts.cache_server_mode == GH__CACHE_SERVER_MODE__TRUST_WITHOUT_VERIFY) { gh__global.cache_server_url = gvfs_cache_server_url; - trace2_data_string("gvfs-helper", NULL, "cache/url", + trace2_data_string(TR2_CAT, NULL, "cache/url", gvfs_cache_server_url); return; } @@ -1365,7 +1424,7 @@ static void select_cache_server(void) if (match) { gh__global.cache_server_url = gvfs_cache_server_url; - trace2_data_string("gvfs-helper", NULL, "cache/url", + trace2_data_string(TR2_CAT, NULL, "cache/url", gvfs_cache_server_url); } @@ -1389,7 +1448,7 @@ static void select_cache_server(void) else warning("could not verify cache-server '%s'", gvfs_cache_server_url); - trace2_data_string("gvfs-helper", NULL, "cache/url", + trace2_data_string(TR2_CAT, NULL, "cache/url", "disabled"); } @@ -1575,27 +1634,14 @@ static void select_odb(void) } /* - * Create a tempfile to stream the packfile into. - * - * We create a tempfile in the chosen ODB directory and let CURL - * automatically stream data to the file. If successful, we can - * later rename it to a proper .pack and run "git index-pack" on - * it to create the corresponding .idx file. - * - * TODO I would rather to just stream the packfile directly into - * TODO "git index-pack --stdin" (and save some I/O) because it - * TODO will automatically take care of the rename of both files - * TODO and any other cleanup. BUT INDEX-PACK WILL ONLY WRITE - * TODO TO THE PRIMARY ODB -- it will not write into the alternates - * TODO (this is considered bad form). So we would need to add - * TODO an option to index-pack to handle this. I don't want to - * TODO deal with this issue right now. - * - * TODO Consider using lockfile for this rather than naked tempfile. + * Create a unique tempfile or tempfile-pair inside the + * tempPacks directory. */ -static void create_tempfile_for_packfile( - struct gh__request_params *params, - struct gh__response_status *status) +static void my_create_tempfile( + struct gh__response_status *status, + int b_fdopen, + const char *suffix1, struct tempfile **t1, + const char *suffix2, struct tempfile **t2) { static unsigned int nth = 0; static struct timeval tv = {0}; @@ -1605,15 +1651,15 @@ static void create_tempfile_for_packfile( struct strbuf basename = STRBUF_INIT; struct strbuf buf = STRBUF_INIT; - int len_p; + int len_tp; enum scld_error scld; gh__response_status__zero(status); if (!nth) { /* - * Create a string to use in the name of all packfiles - * created by this process. + * Create a unique string to use in the name of all + * tempfiles created by this process. */ gettimeofday(&tv, NULL); secs = tv.tv_sec; @@ -1626,84 +1672,114 @@ static void create_tempfile_for_packfile( } /* - * Create a for this packfile using a series number , - * so that all of the chunks we download will group together. + * Create a for this instance/pair using a series + * number . */ - strbuf_addf(&basename, "vfs-%s-%04d", date, nth++); + strbuf_addf(&basename, "t-%s-%04d", date, nth++); + + if (!suffix1 || !*suffix1) + suffix1 = "temp"; /* - * We will stream the data into a managed tempfile() in: + * Create full pathname as: * - * "/pack/tempPacks/vfs--.temp" + * "/pack/tempPacks/." */ strbuf_setlen(&buf, 0); strbuf_addbuf(&buf, &gh__global.buf_odb_path); strbuf_complete(&buf, '/'); - strbuf_addstr(&buf, "pack/"); - len_p = buf.len; - strbuf_addstr(&buf, "tempPacks/"); - strbuf_addbuf(&buf, &basename); - strbuf_addstr(&buf, ".temp"); + strbuf_addstr(&buf, "pack/tempPacks/"); + len_tp = buf.len; + strbuf_addf( &buf, "%s.%s", basename.buf, suffix1); scld = safe_create_leading_directories(buf.buf); if (scld != SCLD_OK && scld != SCLD_EXISTS) { strbuf_addf(&status->error_message, - "could not create directory for packfile: '%s'", + "could not create directory for tempfile: '%s'", buf.buf); status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; goto cleanup; } - params->tempfile = create_tempfile(buf.buf); - if (!params->tempfile) { + *t1 = create_tempfile(buf.buf); + if (!*t1) { strbuf_addf(&status->error_message, - "could not create tempfile for packfile: '%s'", + "could not create tempfile: '%s'", buf.buf); status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; goto cleanup; } - - fdopen_tempfile(params->tempfile, "w"); - - /* - * After the download is complete, we will need to steal the file - * from the tempfile() class (so that it doesn't magically delete - * it when we close the file handle) and then index it. - * - * We do this into the tempPacks directory to avoid contaminating - * the real pack directory until we know there is no corruption. - * - * "/pack/tempPacks/vfs--.temp.pack" - * "/pack/tempPacks/vfs--.temp.idx" - */ - strbuf_setlen(¶ms->temp_path_pack, 0); - strbuf_addf(¶ms->temp_path_pack, "%s.pack", buf.buf); - - strbuf_setlen(¶ms->temp_path_idx, 0); - strbuf_addf(¶ms->temp_path_idx, "%s.idx", buf.buf); + if (b_fdopen) + fdopen_tempfile(*t1, "w"); /* - * Later, if all goes well, we will install them as: + * Optionally create a peer tempfile with the same basename. + * (This is useful for prefetching .pack and .idx pairs.) * - * "/pack/vfs--.pack" - * "/pack/vfs--.idx" + * "/pack/tempPacks/." */ - strbuf_setlen(&buf, len_p); - strbuf_setlen(¶ms->final_path_pack, 0); - strbuf_addf(¶ms->final_path_pack, "%s%s.pack", - buf.buf, basename.buf); - strbuf_setlen(¶ms->final_path_idx, 0); - strbuf_addf(¶ms->final_path_idx, "%s%s.idx", - buf.buf, basename.buf); - strbuf_setlen(¶ms->final_packfile_filename, 0); - strbuf_addf(¶ms->final_packfile_filename, "%s.pack", - basename.buf); + if (suffix2 && *suffix2 && t2) { + strbuf_setlen(&buf, len_tp); + strbuf_addf( &buf, "%s.%s", basename.buf, suffix2); + + *t2 = create_tempfile(buf.buf); + if (!*t2) { + strbuf_addf(&status->error_message, + "could not create tempfile: '%s'", + buf.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; + goto cleanup; + } + if (b_fdopen) + fdopen_tempfile(*t2, "w"); + } cleanup: strbuf_release(&buf); strbuf_release(&basename); } +/* + * Create pathnames to the final location of the .pack and .idx + * files in the ODB. These are of the form: + * + * "/pack/-[-]." + * + * For example, for prefetch packs, will be the epoch + * timestamp and will be the packfile hash. + */ +static void create_final_packfile_pathnames( + const char *term_1, const char *term_2, const char *term_3, + struct strbuf *pack_path, struct strbuf *idx_path, + struct strbuf *pack_filename) +{ + struct strbuf base = STRBUF_INIT; + struct strbuf path = STRBUF_INIT; + + if (term_3 && *term_3) + strbuf_addf(&base, "%s-%s-%s", term_1, term_2, term_3); + else + strbuf_addf(&base, "%s-%s", term_1, term_2); + + strbuf_setlen(pack_filename, 0); + strbuf_addf( pack_filename, "%s.pack", base.buf); + + strbuf_addbuf(&path, &gh__global.buf_odb_path); + strbuf_complete(&path, '/'); + strbuf_addstr(&path, "pack/"); + + strbuf_setlen(pack_path, 0); + strbuf_addbuf(pack_path, &path); + strbuf_addf( pack_path, "%s.pack", base.buf); + + strbuf_setlen(idx_path, 0); + strbuf_addbuf(idx_path, &path); + strbuf_addf( idx_path, "%s.idx", base.buf); + + strbuf_release(&base); + strbuf_release(&path); +} + /* * Create a pathname to the loose object in the shared-cache ODB * with the given OID. Try to "mkdir -p" to ensure the parent @@ -1731,54 +1807,89 @@ static int create_loose_pathname_in_odb(struct strbuf *buf_path, return 0; } -/* - * Create a tempfile to stream a loose object into. - * - * We create a tempfile in the chosen ODB directory and let CURL - * automatically stream data to the file. - * - * We put it directly in the "/xx/" directory. - */ -static void create_tempfile_for_loose( - struct gh__request_params *params, - struct gh__response_status *status) +static void my_run_index_pack(struct gh__request_params *params, + struct gh__response_status *status, + const struct strbuf *temp_path_pack, + const struct strbuf *temp_path_idx, + struct strbuf *packfile_checksum) { - static int nth = 0; - struct strbuf buf_path = STRBUF_INIT; + struct child_process ip = CHILD_PROCESS_INIT; + struct strbuf ip_stdout = STRBUF_INIT; - gh__response_status__zero(status); + argv_array_push(&ip.args, "git"); + argv_array_push(&ip.args, "index-pack"); + if (gh__cmd_opts.show_progress) + argv_array_push(&ip.args, "-v"); + argv_array_pushl(&ip.args, "-o", temp_path_idx->buf, NULL); + argv_array_push(&ip.args, temp_path_pack->buf); + ip.no_stdin = 1; + ip.out = -1; + ip.err = -1; - if (create_loose_pathname_in_odb(&buf_path, ¶ms->loose_oid)) { + if (pipe_command(&ip, NULL, 0, &ip_stdout, 0, NULL, 0)) { + unlink(temp_path_pack->buf); + unlink(temp_path_idx->buf); strbuf_addf(&status->error_message, - "cannot create directory for loose object '%s'", - buf_path.buf); - status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; + "index-pack failed on '%s'", + temp_path_pack->buf); + /* + * Lets assume that index-pack failed because the + * downloaded file is corrupt (truncated). + * + * Retry it as if the network had dropped. + */ + status->retry = GH__RETRY_MODE__TRANSIENT; + status->ec = GH__ERROR_CODE__INDEX_PACK_FAILED; goto cleanup; } - /* Remember the full path of the final destination. */ - strbuf_setlen(¶ms->loose_path, 0); - strbuf_addbuf(¶ms->loose_path, &buf_path); + if (packfile_checksum) { + /* + * stdout from index-pack should have the packfile hash. + * Extract it and use it in the final packfile name. + * + * TODO What kind of validation should we do on the + * TODO string and is there ever any other output besides + * TODO just the checksum ? + */ + strbuf_trim_trailing_newline(&ip_stdout); + + strbuf_addbuf(packfile_checksum, &ip_stdout); + } - /* - * Build a unique tempfile pathname based upon it. We avoid - * using lockfiles to avoid issues with stale locks after - * crashes. - */ - strbuf_addf(&buf_path, ".%08u.%.06u.temp", getpid(), nth++); +cleanup: + strbuf_release(&ip_stdout); + child_process_clear(&ip); +} - params->tempfile = create_tempfile(buf_path.buf); - if (!params->tempfile) { - strbuf_addstr(&status->error_message, - "could not create tempfile for loose object"); - status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE; - goto cleanup; +static void my_finalize_packfile(struct gh__request_params *params, + struct gh__response_status *status, + const struct strbuf *temp_path_pack, + const struct strbuf *temp_path_idx, + struct strbuf *final_path_pack, + struct strbuf *final_path_idx, + struct strbuf *final_filename) +{ + if (finalize_object_file(temp_path_pack->buf, final_path_pack->buf) || + finalize_object_file(temp_path_idx->buf, final_path_idx->buf)) { + unlink(temp_path_pack->buf); + unlink(temp_path_idx->buf); + unlink(final_path_pack->buf); + unlink(final_path_idx->buf); + strbuf_addf(&status->error_message, + "could not install packfile '%s'", + final_path_pack->buf); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; + return; } - fdopen_tempfile(params->tempfile, "w"); + if (params->result_list) { + struct strbuf result_msg = STRBUF_INIT; -cleanup: - strbuf_release(&buf_path); + strbuf_addf(&result_msg, "packfile %s", final_filename->buf); + string_list_append(params->result_list, result_msg.buf); + strbuf_release(&result_msg); + } } /* @@ -1788,93 +1899,335 @@ static void create_tempfile_for_loose( static void install_packfile(struct gh__request_params *params, struct gh__response_status *status) { - struct child_process ip = CHILD_PROCESS_INIT; + struct strbuf temp_path_pack = STRBUF_INIT; + struct strbuf temp_path_idx = STRBUF_INIT; + struct strbuf packfile_checksum = STRBUF_INIT; + struct strbuf final_path_pack = STRBUF_INIT; + struct strbuf final_path_idx = STRBUF_INIT; + struct strbuf final_filename = STRBUF_INIT; + + gh__response_status__zero(status); /* - * When we request more than 1 object, the server should always - * send us a packfile. + * After the download is complete, we will need to steal the file + * from the tempfile() class (so that it doesn't magically delete + * it when we close the file handle) and then index it. */ - if (strcmp(status->content_type.buf, - "application/x-git-packfile")) { - strbuf_addf(&status->error_message, - "install_packfile: received unknown content-type '%s'", - status->content_type.buf); - status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; - goto cleanup; - } - - gh__response_status__zero(status); + strbuf_addf(&temp_path_pack, "%s.pack", + get_tempfile_path(params->tempfile)); + strbuf_addf(&temp_path_idx, "%s.idx", + get_tempfile_path(params->tempfile)); if (rename_tempfile(¶ms->tempfile, - params->temp_path_pack.buf) == -1) { + temp_path_pack.buf) == -1) { strbuf_addf(&status->error_message, "could not rename packfile to '%s'", - params->temp_path_pack.buf); + temp_path_pack.buf); status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; goto cleanup; } - argv_array_push(&ip.args, "index-pack"); - if (gh__cmd_opts.show_progress) - argv_array_push(&ip.args, "-v"); - argv_array_pushl(&ip.args, "-o", params->temp_path_idx.buf, NULL); - argv_array_push(&ip.args, params->temp_path_pack.buf); - ip.git_cmd = 1; - ip.no_stdin = 1; - ip.no_stdout = 1; + my_run_index_pack(params, status, &temp_path_pack, &temp_path_idx, + &packfile_checksum); + if (status->ec != GH__ERROR_CODE__OK) + goto cleanup; + + create_final_packfile_pathnames("vfs", packfile_checksum.buf, NULL, + &final_path_pack, &final_path_idx, + &final_filename); + my_finalize_packfile(params, status, + &temp_path_pack, &temp_path_idx, + &final_path_pack, &final_path_idx, + &final_filename); + +cleanup: + strbuf_release(&temp_path_pack); + strbuf_release(&temp_path_idx); + strbuf_release(&packfile_checksum); + strbuf_release(&final_path_pack); + strbuf_release(&final_path_idx); + strbuf_release(&final_filename); +} + +/* + * bswap.h only defines big endian functions. + * The GVFS Protocol defines fields in little endian. + */ +static inline uint64_t my_get_le64(uint64_t le_val) +{ +#if GIT_BYTE_ORDER == GIT_LITTLE_ENDIAN + return le_val; +#else + return default_bswap64(le_val); +#endif +} + +#define MY_MIN(x,y) (((x) < (y)) ? (x) : (y)) +#define MY_MAX(x,y) (((x) > (y)) ? (x) : (y)) + +/* + * Copy the `nr_bytes_total` from `fd_in` to `fd_out`. + * + * This could be used to extract a single packfile from + * a multipart file, for example. + */ +static int my_copy_fd_len(int fd_in, int fd_out, ssize_t nr_bytes_total) +{ + char buffer[8192]; + + while (nr_bytes_total > 0) { + ssize_t len_to_read = MY_MIN(nr_bytes_total, sizeof(buffer)); + ssize_t nr_read = xread(fd_in, buffer, len_to_read); + + if (!nr_read) + break; + if (nr_read < 0) + return -1; + + if (write_in_full(fd_out, buffer, nr_read) < 0) + return -1; + + nr_bytes_total -= nr_read; + } + + return 0; +} + +/* + * Copy the `nr_bytes_total` from `fd_in` to `fd_out` AND save the + * final `tail_len` bytes in the given buffer. + * + * This could be used to extract a single packfile from + * a multipart file and read the final SHA into the buffer. + */ +static int my_copy_fd_len_tail(int fd_in, int fd_out, ssize_t nr_bytes_total, + unsigned char *buf_tail, ssize_t tail_len) +{ + memset(buf_tail, 0, tail_len); + + if (nr_bytes_total < tail_len) + return my_copy_fd_len(fd_in, fd_out, nr_bytes_total); + + if (my_copy_fd_len(fd_in, fd_out, (nr_bytes_total - tail_len)) < 0) + return -1; + + if (xread(fd_in, (char *)buf_tail, tail_len) != tail_len) + return -1; + + if (write_in_full(fd_out, buf_tail, tail_len) < 0) + return -1; + + return 0; +} + +/* + * See the protocol document for the per-packfile header. + */ +struct ph { + uint64_t timestamp; + uint64_t pack_len; + uint64_t idx_len; +}; + +/* + * Extract the next packfile from the multipack. + */ +static void extract_packfile_from_multipack( + struct gh__request_params *params, + struct gh__response_status *status, + int fd_multipack, + unsigned short k) +{ + struct ph ph; + struct tempfile *tempfile_pack = NULL; + struct tempfile *tempfile_idx = NULL; + int result = -1; + int b_no_idx_in_multipack; + struct object_id packfile_checksum; + char hex_checksum[GIT_MAX_HEXSZ + 1]; + struct strbuf buf_timestamp = STRBUF_INIT; + struct strbuf temp_path_pack = STRBUF_INIT; + struct strbuf temp_path_idx = STRBUF_INIT; + struct strbuf final_path_pack = STRBUF_INIT; + struct strbuf final_path_idx = STRBUF_INIT; + struct strbuf final_filename = STRBUF_INIT; + + if (xread(fd_multipack, &ph, sizeof(ph)) != sizeof(ph)) { + strbuf_addf(&status->error_message, + "could not read header for packfile[%d] in multipack", + k); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH; + goto done; + } + + ph.timestamp = my_get_le64(ph.timestamp); + ph.pack_len = my_get_le64(ph.pack_len); + ph.idx_len = my_get_le64(ph.idx_len); + + if (!ph.pack_len) { + strbuf_addf(&status->error_message, + "packfile[%d]: zero length packfile?", k); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH; + goto done; + } + + b_no_idx_in_multipack = (ph.idx_len == maximum_unsigned_value_of_type(uint64_t) || + ph.idx_len == 0); + + if (b_no_idx_in_multipack) { + my_create_tempfile(status, 0, "pack", &tempfile_pack, NULL, NULL); + if (!tempfile_pack) + goto done; + } else { + /* create a pair of tempfiles with the same basename */ + my_create_tempfile(status, 0, "pack", &tempfile_pack, "idx", &tempfile_idx); + if (!tempfile_pack || !tempfile_idx) + goto done; + } /* - * Note that I DO NOT have a trace2 region around the - * index-pack process by itself. Currently, we are inside the - * trace2 region for running the request and that's fine. - * Later, if/when we stream the download directly to - * index-pack, it will be inside under the same region anyway. - * So, I'm not going to introduce it here. + * Copy the current packfile from the open stream and capture + * the checksum. + * + * TODO This assumes that the checksum is SHA1. Fix this if/when + * TODO Git converts to SHA256. */ - if (run_command(&ip)) { - unlink(params->temp_path_pack.buf); - unlink(params->temp_path_idx.buf); + result = my_copy_fd_len_tail(fd_multipack, + get_tempfile_fd(tempfile_pack), + ph.pack_len, + packfile_checksum.hash, + GIT_SHA1_RAWSZ); + if (result < 0){ strbuf_addf(&status->error_message, - "index-pack failed on '%s'", - params->temp_path_pack.buf); + "could not extract packfile[%d] from multipack", + k); + goto done; + } + strbuf_addstr(&temp_path_pack, get_tempfile_path(tempfile_pack)); + close_tempfile_gently(tempfile_pack); + + oid_to_hex_r(hex_checksum, &packfile_checksum); + + if (b_no_idx_in_multipack) { /* - * Lets assume that index-pack failed because the - * downloaded file is corrupt (truncated). - * - * Retry it as if the network had dropped. + * The server did not send the corresponding .idx, so + * we have to compute it ourselves. */ - status->retry = GH__RETRY_MODE__TRANSIENT; - status->ec = GH__ERROR_CODE__INDEX_PACK_FAILED; + strbuf_addbuf(&temp_path_idx, &temp_path_pack); + strbuf_strip_suffix(&temp_path_idx, ".pack"); + strbuf_addstr(&temp_path_idx, ".idx"); + + my_run_index_pack(params, status, + &temp_path_pack, &temp_path_idx, + NULL); + if (status->ec != GH__ERROR_CODE__OK) + goto done; + + } else { + /* + * Server send the .idx immediately after the .pack in the + * data stream. I'm tempted to verify it, but that defeats + * the purpose of having it cached... + */ + if (my_copy_fd_len(fd_multipack, get_tempfile_fd(tempfile_idx), + ph.idx_len) < 0) { + strbuf_addf(&status->error_message, + "could not extract index[%d] in multipack", + k); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH; + goto done; + } + + strbuf_addstr(&temp_path_idx, get_tempfile_path(tempfile_idx)); + close_tempfile_gently(tempfile_idx); + } + + strbuf_addf(&buf_timestamp, "%u", (unsigned int)ph.timestamp); + create_final_packfile_pathnames("prefetch", buf_timestamp.buf, hex_checksum, + &final_path_pack, &final_path_idx, + &final_filename); + + my_finalize_packfile(params, status, + &temp_path_pack, &temp_path_idx, + &final_path_pack, &final_path_idx, + &final_filename); + +done: + delete_tempfile(&tempfile_pack); + delete_tempfile(&tempfile_idx); + strbuf_release(&temp_path_pack); + strbuf_release(&temp_path_idx); + strbuf_release(&final_path_pack); + strbuf_release(&final_path_idx); + strbuf_release(&final_filename); +} + +/* + * Cut apart the received multipart response into individual packfiles + * and install each one. + */ +static void install_prefetch(struct gh__request_params *params, + struct gh__response_status *status) +{ + static unsigned char v1_h[6] = { 'G', 'P', 'R', 'E', ' ', 0x01 }; + + struct mh { + unsigned char h[6]; + unsigned char np[2]; + }; + + struct mh mh; + unsigned short np; + unsigned short k; + int fd = -1; + + struct strbuf temp_path_mp = STRBUF_INIT; + + /* + * Steal the multi-part file from the tempfile class. + */ + strbuf_addf(&temp_path_mp, "%s.mp", get_tempfile_path(params->tempfile)); + if (rename_tempfile(¶ms->tempfile, temp_path_mp.buf) == -1) { + strbuf_addf(&status->error_message, + "could not rename prefetch tempfile to '%s'", + temp_path_mp.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH; goto cleanup; } - if (finalize_object_file(params->temp_path_pack.buf, - params->final_path_pack.buf) || - finalize_object_file(params->temp_path_idx.buf, - params->final_path_idx.buf)) { - unlink(params->temp_path_pack.buf); - unlink(params->temp_path_idx.buf); - unlink(params->final_path_pack.buf); - unlink(params->final_path_idx.buf); + fd = git_open_cloexec(temp_path_mp.buf, O_RDONLY); + if (fd == -1) { strbuf_addf(&status->error_message, - "could not install packfile '%s'", - params->final_path_pack.buf); - status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE; + "could not reopen prefetch tempfile '%s'", + temp_path_mp.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH; goto cleanup; } + if ((xread(fd, &mh, sizeof(mh)) != sizeof(mh)) || + (memcmp(mh.h, &v1_h, sizeof(mh.h)))) { + strbuf_addstr(&status->error_message, + "invalid prefetch multipart header"); + goto cleanup; + } - if (params->result_list) { - struct strbuf result_msg = STRBUF_INIT; + np = (unsigned short)mh.np[0] + ((unsigned short)mh.np[1] << 8); + if (np) + trace2_data_intmax(TR2_CAT, NULL, + "prefetch/packfile_count", np); - strbuf_addf(&result_msg, "packfile %s", - params->final_packfile_filename.buf); - string_list_append(params->result_list, result_msg.buf); - strbuf_release(&result_msg); + for (k = 0; k < np; k++) { + extract_packfile_from_multipack(params, status, fd, k); + if (status->ec != GH__ERROR_CODE__OK) + break; } cleanup: - child_process_clear(&ip); + if (fd != -1) + close(fd); + + unlink(temp_path_mp.buf); + strbuf_release(&temp_path_mp); } /* @@ -1884,21 +2237,7 @@ static void install_loose(struct gh__request_params *params, struct gh__response_status *status) { struct strbuf tmp_path = STRBUF_INIT; - - /* - * We expect a loose object when we do a GET -or- when we - * do a POST with only 1 object. - * - * Note that this content type is singular, not plural. - */ - if (strcmp(status->content_type.buf, - "application/x-git-loose-object")) { - strbuf_addf(&status->error_message, - "install_loose: received unknown content-type '%s'", - status->content_type.buf); - status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; - return; - } + struct strbuf loose_path = STRBUF_INIT; gh__response_status__zero(status); @@ -1923,11 +2262,19 @@ static void install_loose(struct gh__request_params *params, * collision we have to assume something else is happening in * parallel and we lost the race. And that's OK. */ - if (finalize_object_file(tmp_path.buf, params->loose_path.buf)) { + if (create_loose_pathname_in_odb(&loose_path, ¶ms->loose_oid)) { + strbuf_addf(&status->error_message, + "cannot create directory for loose object '%s'", + loose_path.buf); + status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE; + goto cleanup; + } + + if (finalize_object_file(tmp_path.buf, loose_path.buf)) { unlink(tmp_path.buf); strbuf_addf(&status->error_message, "could not install loose object '%s'", - params->loose_path.buf); + loose_path.buf); status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE; goto cleanup; } @@ -1943,6 +2290,57 @@ static void install_loose(struct gh__request_params *params, cleanup: strbuf_release(&tmp_path); + strbuf_release(&loose_path); +} + +static void install_result(struct gh__request_params *params, + struct gh__response_status *status) +{ + if (params->objects_mode == GH__OBJECTS_MODE__PREFETCH) { + /* + * The "gvfs/prefetch" API is the only thing that sends + * these multi-part packfiles. According to the procotol + * documentation, they will have this x- content type. + * + * However, it appears that there is a BUG in the origin + * server causing it to sometimes send "text/html" instead. + * So, we silently handle both. + */ + if (!strcmp(status->content_type.buf, + "application/x-gvfs-timestamped-packfiles-indexes")) { + install_prefetch(params, status); + return; + } + + if (!strcmp(status->content_type.buf, "text/html")) { + install_prefetch(params, status); + return; + } + } + + if (!strcmp(status->content_type.buf, "application/x-git-packfile")) { + assert(params->b_is_post); + assert(params->objects_mode == GH__OBJECTS_MODE__POST); + + install_packfile(params, status); + return; + } + + if (!strcmp(status->content_type.buf, + "application/x-git-loose-object")) { + /* + * We get these for "gvfs/objects" GET and POST requests. + * + * Note that this content type is singular, not plural. + */ + install_loose(params, status); + return; + } + + strbuf_addf(&status->error_message, + "install_result: received unknown content-type '%s'", + status->content_type.buf); + status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; } /* @@ -2008,7 +2406,7 @@ static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems, * Other servers have similar sets of values, but I haven't * compared them in depth. */ - // trace2_printf("Throttle: %s %s", key.buf, val.buf); + // trace2_printf("%s: Throttle: %s %s", TR2_CAT, key.buf, val.buf); if (!strcmp(key.buf, "X-RateLimit-Resource")) { /* @@ -2019,7 +2417,7 @@ static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems, strbuf_addstr(&key, "ratelimit/resource"); strbuf_addstr(&key, gh__server_type_label[params->server_type]); - trace2_data_string("gvfs-helper", NULL, key.buf, val.buf); + trace2_data_string(TR2_CAT, NULL, key.buf, val.buf); } else if (!strcmp(key.buf, "X-RateLimit-Delay")) { @@ -2035,7 +2433,7 @@ static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems, git_parse_ulong(val.buf, &tarpit_delay_ms); - trace2_data_intmax("gvfs-helper", NULL, key.buf, tarpit_delay_ms); + trace2_data_intmax(TR2_CAT, NULL, key.buf, tarpit_delay_ms); } else if (!strcmp(key.buf, "X-RateLimit-Limit")) { @@ -2133,7 +2531,7 @@ static void do_throttle_spin(struct gh__request_params *params, strbuf_addstr(®ion, tr2_label); strbuf_addstr(®ion, gh__server_type_label[params->server_type]); - trace2_region_enter("gvfs-helper", region.buf, NULL); + trace2_region_enter(TR2_CAT, region.buf, NULL); if (gh__cmd_opts.show_progress) progress = start_progress(progress_msg, duration); @@ -2149,7 +2547,7 @@ static void do_throttle_spin(struct gh__request_params *params, display_progress(progress, duration); stop_progress(&progress); - trace2_region_leave("gvfs-helper", region.buf, NULL); + trace2_region_leave(TR2_CAT, region.buf, NULL); strbuf_release(®ion); } @@ -2233,11 +2631,7 @@ static void do_req(const char *url_base, if (params->tempfile) delete_tempfile(¶ms->tempfile); - if (params->b_is_post) - create_tempfile_for_packfile(params, status); - - create_tempfile_for_loose(params, status); - + my_create_tempfile(status, 1, NULL, ¶ms->tempfile, NULL, NULL); if (!params->tempfile || status->ec != GH__ERROR_CODE__OK) return; } else { @@ -2525,6 +2919,7 @@ static void do__http_get__gvfs_config(struct gh__response_status *status, /* cache-servers do not handle gvfs/config REST calls */ params.b_permit_cache_server_if_defined = 0; params.buffer = config_data; + params.objects_mode = GH__OBJECTS_MODE__NONE; params.object_count = 1; /* a bit of a lie */ @@ -2589,6 +2984,7 @@ static void do__http_get__gvfs_object(struct gh__response_status *status, params.b_is_post = 0; params.b_write_to_file = 1; params.b_permit_cache_server_if_defined = 1; + params.objects_mode = GH__OBJECTS_MODE__GET; params.object_count = 1; @@ -2645,6 +3041,7 @@ static void do__http_post__gvfs_objects(struct gh__response_status *status, params.b_is_post = 1; params.b_write_to_file = 1; params.b_permit_cache_server_if_defined = 1; + params.objects_mode = GH__OBJECTS_MODE__POST; params.post_payload = &jw_req.json; @@ -2681,6 +3078,126 @@ static void do__http_post__gvfs_objects(struct gh__response_status *status, jw_release(&jw_req); } +struct find_last_data { + timestamp_t timestamp; + int nr_files; +}; + +static void cb_find_last(const char *full_path, size_t full_path_len, + const char *file_path, void *void_data) +{ + struct find_last_data *data = void_data; + const char *val; + timestamp_t t; + + if (!skip_prefix(file_path, "prefetch-", &val)) + return; + if (!ends_with(val, ".pack")) + return; + + data->nr_files++; + + /* + * We expect prefetch packfiles named like: + * + * prefetch--.pack + */ + t = strtol(val, NULL, 10); + + data->timestamp = MY_MAX(t, data->timestamp); +} + +/* + * Find the server timestamp on the last prefetch packfile that + * we have in the ODB. + * + * TODO I'm going to assume that all prefetch packs are created + * TODO equal and take the one with the largest t value. + * TODO + * TODO Or should we look for one marked with .keep ? + * + * TODO Alternatively, should we maybe get the 2nd largest? + * TODO (Or maybe subtract an hour delta from the largest?) + * TODO + * TODO Since each cache-server maintains its own set of prefetch + * TODO packs (such that 2 requests may hit 2 different + * TODO load-balanced servers and get different anwsers (with or + * TODO without clock-skew issues)), is it possible for us to miss + * TODO the absolute fringe of new commits and trees? + * TODO + * TODO That is, since the cache-server generates hourly prefetch + * TODO packs, we could do a prefetch and be up-to-date, but then + * TODO do the main fetch and hit a different cache/main server + * TODO and be behind by as much as an hour and have to demand- + * TODO load the commits/trees. + * + * TODO Alternatively, should we compare the last timestamp found + * TODO with "now" and silently do nothing if within an epsilon? + */ +static void find_last_prefetch_timestamp(timestamp_t *last) +{ + struct find_last_data data; + + memset(&data, 0, sizeof(data)); + + for_each_file_in_pack_dir(gh__global.buf_odb_path.buf, cb_find_last, &data); + + *last = data.timestamp; +} + +/* + * Call "gvfs/prefetch[?lastPackTimestamp=]" REST API to + * fetch a series of packfiles and write them to the ODB. + * + * Return a list of packfile names. + */ +static void do__http_get__gvfs_prefetch(struct gh__response_status *status, + timestamp_t seconds_since_epoch, + struct string_list *result_list) +{ + struct gh__request_params params = GH__REQUEST_PARAMS_INIT; + struct strbuf component_url = STRBUF_INIT; + + gh__response_status__zero(status); + + strbuf_addstr(&component_url, "gvfs/prefetch"); + + if (!seconds_since_epoch) + find_last_prefetch_timestamp(&seconds_since_epoch); + if (seconds_since_epoch) + strbuf_addf(&component_url, "?lastPackTimestamp=%"PRItime, + seconds_since_epoch); + + params.b_is_post = 0; + params.b_write_to_file = 1; + params.b_permit_cache_server_if_defined = 1; + params.objects_mode = GH__OBJECTS_MODE__PREFETCH; + + params.object_count = -1; + + params.result_list = result_list; + + params.headers = http_copy_default_headers(); + params.headers = curl_slist_append(params.headers, + "X-TFS-FedAuthRedirect: Suppress"); + params.headers = curl_slist_append(params.headers, + "Pragma: no-cache"); + params.headers = curl_slist_append(params.headers, + "Accept: application/x-gvfs-timestamped-packfiles-indexes"); + + if (gh__cmd_opts.show_progress) + strbuf_addf(¶ms.progress_base_phase3_msg, + "Prefetch %"PRItime" (%s)", + seconds_since_epoch, + show_date(seconds_since_epoch, 0, + DATE_MODE(ISO8601))); + + do_req__with_fallback(component_url.buf, ¶ms, status); + + gh__request_params__release(¶ms); + strbuf_release(&component_url); +} + /* * Drive one or more HTTP GET requests to fetch the objects * in the given OIDSET. These are received into loose objects. @@ -2981,7 +3498,83 @@ static enum gh__error_code do_sub_cmd__post(int argc, const char **argv) } /* - * Handle the 'objects.get' and 'objects.post' verbs in "server mode". + * Interpret the given string as a timestamp and compute an absolute + * UTC-seconds-since-epoch value (and without TZ). + * + * Note that the gvfs/prefetch API only accepts seconds since epoch, + * so that is all we really need here. But there is a tradition of + * various Git commands allowing a variety of formats for args like + * this. For example, see the `--date` arg in `git commit`. We allow + * these other forms mainly for testing purposes. + */ +static int my_parse_since(const char *since, timestamp_t *p_timestamp) +{ + int offset = 0; + int errors = 0; + unsigned long t; + + if (!parse_date_basic(since, p_timestamp, &offset)) + return 0; + + t = approxidate_careful(since, &errors); + if (!errors) { + *p_timestamp = t; + return 0; + } + + return -1; +} + +/* + * Ask the server for all available packfiles -or- all available since + * the given timestamp. + */ +static enum gh__error_code do_sub_cmd__prefetch(int argc, const char **argv) +{ + static const char *since_str; + static struct option prefetch_options[] = { + OPT_STRING(0, "since", &since_str, N_("since"), N_("seconds since epoch")), + OPT_END(), + }; + + struct gh__response_status status = GH__RESPONSE_STATUS_INIT; + struct string_list result_list = STRING_LIST_INIT_DUP; + enum gh__error_code ec = GH__ERROR_CODE__OK; + timestamp_t seconds_since_epoch = 0; + int k; + + trace2_cmd_mode("prefetch"); + + if (argc > 1 && !strcmp(argv[1], "-h")) + usage_with_options(prefetch_usage, prefetch_options); + + argc = parse_options(argc, argv, NULL, prefetch_options, prefetch_usage, 0); + if (since_str && *since_str) { + if (my_parse_since(since_str, &seconds_since_epoch)) + die("could not parse 'since' field"); + } + + finish_init(1); + + do__http_get__gvfs_prefetch(&status, seconds_since_epoch, &result_list); + + ec = status.ec; + + for (k = 0; k < result_list.nr; k++) + printf("%s\n", result_list.items[k].string); + + if (ec != GH__ERROR_CODE__OK) + error("prefetch: %s", status.error_message.buf); + + gh__response_status__release(&status); + string_list_clear(&result_list, 0); + + return ec; +} + +/* + * Handle the 'objects.get' and 'objects.post' and 'objects.prefetch' + * verbs in "server mode". * * Only call error() and set ec for hard errors where we cannot * communicate correctly with the foreground client process. Pass any @@ -3001,45 +3594,73 @@ static enum gh__error_code do_server_subprocess__objects(const char *verb_line) int k; enum gh__objects_mode objects_mode; unsigned long nr_oid_total = 0; + timestamp_t seconds_since_epoch = 0; if (!strcmp(verb_line, "objects.get")) objects_mode = GH__OBJECTS_MODE__GET; else if (!strcmp(verb_line, "objects.post")) objects_mode = GH__OBJECTS_MODE__POST; + else if (!strcmp(verb_line, "objects.prefetch")) + objects_mode = GH__OBJECTS_MODE__PREFETCH; else { error("server: unexpected objects-mode verb '%s'", verb_line); ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; goto cleanup; } - while (1) { - len = packet_read_line_gently(0, NULL, &line); - if (len < 0 || !line) - break; + switch (objects_mode) { + case GH__OBJECTS_MODE__GET: + case GH__OBJECTS_MODE__POST: + while (1) { + len = packet_read_line_gently(0, NULL, &line); + if (len < 0 || !line) + break; - if (get_oid_hex(line, &oid)) { - error("server: invalid oid syntax '%s'", line); - ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + if (get_oid_hex(line, &oid)) { + error("server: invalid oid syntax '%s'", line); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + goto cleanup; + } + + if (!oidset_insert(&oids, &oid)) + nr_oid_total++; + } + + if (!nr_oid_total) { + /* if zero objects requested, trivial OK. */ + if (packet_write_fmt_gently(1, "ok\n")) { + error("server: cannot write 'get' result to client"); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + } else + ec = GH__ERROR_CODE__OK; goto cleanup; } - if (!oidset_insert(&oids, &oid)) - nr_oid_total++; - } + if (objects_mode == GH__OBJECTS_MODE__GET) + do__http_get__fetch_oidset(&status, &oids, + nr_oid_total, &result_list); + else + do__http_post__fetch_oidset(&status, &oids, + nr_oid_total, &result_list); + break; - if (!nr_oid_total) { - if (packet_write_fmt_gently(1, "ok\n")) { - error("server: cannot write 'get' result to client"); - ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; - } else - ec = GH__ERROR_CODE__OK; - goto cleanup; - } + case GH__OBJECTS_MODE__PREFETCH: + /* get optional timestamp line */ + while (1) { + len = packet_read_line_gently(0, NULL, &line); + if (len < 0 || !line) + break; - if (objects_mode == GH__OBJECTS_MODE__GET) - do__http_get__fetch_oidset(&status, &oids, nr_oid_total, &result_list); - else - do__http_post__fetch_oidset(&status, &oids, nr_oid_total, &result_list); + seconds_since_epoch = strtoul(line, NULL, 10); + } + + do__http_get__gvfs_prefetch(&status, seconds_since_epoch, + &result_list); + break; + + default: + BUG("unexpected object_mode in switch '%d'", objects_mode); + } /* * Write pathname of the ODB where we wrote all of the objects @@ -3263,12 +3884,16 @@ static enum gh__error_code do_sub_cmd(int argc, const char **argv) if (!strcmp(argv[0], "config")) return do_sub_cmd__config(argc, argv); + if (!strcmp(argv[0], "prefetch")) + return do_sub_cmd__prefetch(argc, argv); + + /* + * server mode is for talking with git.exe via the "gh_client_" API + * using packet-line format. + */ if (!strcmp(argv[0], "server")) return do_sub_cmd__server(argc, argv); - // TODO have "test" mode that could be used to drive - // TODO unit testing. - return GH__ERROR_CODE__USAGE; } diff --git a/t/helper/test-gvfs-protocol.c b/t/helper/test-gvfs-protocol.c index c73d5327815266..5913ffb5cada25 100644 --- a/t/helper/test-gvfs-protocol.c +++ b/t/helper/test-gvfs-protocol.c @@ -13,6 +13,7 @@ #include "dir.h" #include "json-writer.h" #include "oidset.h" +#include "packfile.h" #define TR2_CAT "test-gvfs-protocol" @@ -538,9 +539,6 @@ static enum worker_result send_loose_object(const struct object_id *oid, return send_http_error(1, 404, "Not Found", -1, WR_MAYHEM); } - trace2_printf("%s: OBJECT type=%d len=%ld '%.40s'", TR2_CAT, - type, size, (const char *)content); - /* * We are blending several somewhat independent concepts here: * @@ -838,7 +836,6 @@ static enum worker_result get_packfile_from_oids( goto done; } - trace2_printf("%s: pack-objects returned %d bytes", TR2_CAT, buf_packfile->len); wr = WR_OK; done: @@ -986,6 +983,305 @@ static enum worker_result do__gvfs_objects__post(struct req *req) return wr; } +/* + * bswap.h only defines big endian functions. + * The GVFS Protocol defines fields in little endian. + */ +static inline uint64_t my_get_le64(uint64_t le_val) +{ +#if GIT_BYTE_ORDER == GIT_LITTLE_ENDIAN + return le_val; +#else + return default_bswap64(le_val); +#endif +} + +static inline uint16_t my_get_le16(uint16_t le_val) +{ +#if GIT_BYTE_ORDER == GIT_LITTLE_ENDIAN + return le_val; +#else + return default_bswap16(le_val); +#endif +} + +/* + * GVFS Protocol headers for the multipack format + * All integer values are little-endian on the wire. + * + * Note: technically, the protocol defines the `ph` fields as signed, but + * that makes a mess of the bswap routines and we're not going to overflow + * them for a very long time. + */ + +static unsigned char v1_h[6] = { 'G', 'P', 'R', 'E', ' ', 0x01 }; + +struct ph { + uint64_t timestamp; + uint64_t len_pack; + uint64_t len_idx; +}; + +/* + * Accumulate a list of commits-and-trees packfiles we have in the local ODB. + * The test script should have pre-created a set of "ct-.pack" and .idx + * files for us. We serve these as is and DO NOT try to dynamically create + * new commits/trees packfiles (like the cache-server does). We are only + * testing if/whether gvfs-helper.exe can receive one or more packfiles and + * idx files over the protocol. + */ +struct ct_pack_item { + struct ph ph; + struct strbuf path_pack; + struct strbuf path_idx; +}; + +static void ct_pack_item__free(struct ct_pack_item *item) +{ + if (!item) + return; + strbuf_release(&item->path_pack); + strbuf_release(&item->path_idx); + free(item); +} + +struct ct_pack_data { + struct ct_pack_item **items; + size_t nr, alloc; +}; + +static void ct_pack_data__release(struct ct_pack_data *data) +{ + int k; + + if (!data) + return; + + for (k = 0; k < data->nr; k++) + ct_pack_item__free(data->items[k]); + + FREE_AND_NULL(data->items); + data->nr = 0; + data->alloc = 0; +} + +static void cb_ct_pack(const char *full_path, size_t full_path_len, + const char *file_path, void *void_data) +{ + struct ct_pack_data *data = void_data; + struct ct_pack_item *item = NULL; + struct stat st; + const char *v; + + /* + * We only want "ct-.pack" files. The test script creates + * cached commits-and-trees packfiles with this prefix to avoid + * confusion with prefetch packfiles received by gvfs-helper. + */ + if (!ends_with(file_path, ".pack")) + return; + if (!skip_prefix(file_path, "ct-", &v)) + return; + + item = (struct ct_pack_item *)xcalloc(1, sizeof(*item)); + strbuf_init(&item->path_pack, 0); + strbuf_addstr(&item->path_pack, full_path); + + strbuf_init(&item->path_idx, 0); + strbuf_addstr(&item->path_idx, full_path); + strbuf_strip_suffix(&item->path_idx, ".pack"); + strbuf_addstr(&item->path_idx, ".idx"); + + item->ph.timestamp = (uint64_t)strtoul(v, NULL, 10); + + lstat(item->path_pack.buf, &st); + item->ph.len_pack = (uint64_t)st.st_size; + + if (string_list_has_string(&mayhem_list, "no_prefetch_idx")) + item->ph.len_idx = maximum_unsigned_value_of_type(uint64_t); + else if (lstat(item->path_idx.buf, &st) < 0) + item->ph.len_idx = maximum_unsigned_value_of_type(uint64_t); + else + item->ph.len_idx = (uint64_t)st.st_size; + + ALLOC_GROW(data->items, data->nr + 1, data->alloc); + data->items[data->nr++] = item; +} + +/* + * Sort by increasing EPOCH time. + */ +static int ct_pack_sort_compare(const void *_a, const void *_b) +{ + const struct ct_pack_item *a = *(const struct ct_pack_item **)_a; + const struct ct_pack_item *b = *(const struct ct_pack_item **)_b; + return (a->ph.timestamp < b->ph.timestamp) ? -1 : (a->ph.timestamp != b->ph.timestamp); +} + +static enum worker_result send_ct_item(const struct ct_pack_item *item) +{ + struct ph ph_le; + int fd_pack = -1; + int fd_idx = -1; + enum worker_result wr = WR_OK; + + /* send per-packfile header. all fields are little-endian on the wire. */ + ph_le.timestamp = my_get_le64(item->ph.timestamp); + ph_le.len_pack = my_get_le64(item->ph.len_pack); + ph_le.len_idx = my_get_le64(item->ph.len_idx); + + if (write_in_full(1, &ph_le, sizeof(ph_le)) < 0) { + logerror("unable to write ph_le"); + wr = WR_IO_ERROR; + goto done; + } + + trace2_printf("%s: sending prefetch pack '%s'", TR2_CAT, item->path_pack.buf); + + fd_pack = git_open_cloexec(item->path_pack.buf, O_RDONLY); + if (fd_pack == -1 || copy_fd(fd_pack, 1)) { + logerror("could not send packfile"); + wr = WR_IO_ERROR; + goto done; + } + + if (item->ph.len_idx != maximum_unsigned_value_of_type(uint64_t)) { + trace2_printf("%s: sending prefetch idx '%s'", TR2_CAT, item->path_idx.buf); + + fd_idx = git_open_cloexec(item->path_idx.buf, O_RDONLY); + if (fd_idx == -1 || copy_fd(fd_idx, 1)) { + logerror("could not send idx"); + wr = WR_IO_ERROR; + goto done; + } + } + +done: + if (fd_pack != -1) + close(fd_pack); + if (fd_idx != -1) + close(fd_idx); + return wr; +} + +/* + * The GVFS Protocol defines the lastTimeStamp parameter as the value + * of the last prefetch pack that the client has. Therefore, we only + * want to send newer ones. + */ +static int want_ct_pack(const struct ct_pack_item *item, timestamp_t last_timestamp) +{ + return item->ph.timestamp > last_timestamp; +} + +static enum worker_result send_multipack(struct ct_pack_data *data, + timestamp_t last_timestamp) +{ + struct strbuf response_header = STRBUF_INIT; + struct strbuf uuid = STRBUF_INIT; + enum worker_result wr; + size_t content_len = 0; + unsigned short np = 0; + unsigned short np_le; + int k; + + /* + * Precompute the content-length so that we don't have to deal with + * chunking it. + */ + content_len += sizeof(v1_h) + sizeof(np); + for (k = 0; k < data->nr; k++) { + struct ct_pack_item *item = data->items[k]; + + if (!want_ct_pack(item, last_timestamp)) + continue; + + np++; + content_len += sizeof(struct ph); + content_len += item->ph.len_pack; + if (item->ph.len_idx != maximum_unsigned_value_of_type(uint64_t)) + content_len += item->ph.len_idx; + } + + strbuf_addstr(&response_header, "HTTP/1.1 200 OK\r\n"); + strbuf_addstr(&response_header, "Cache-Control: private\r\n"); + strbuf_addstr(&response_header, + "Content-Type: application/x-gvfs-timestamped-packfiles-indexes\r\n"); + strbuf_addf( &response_header, "Content-Length: %d\r\n", (int)content_len); + strbuf_addf( &response_header, "Server: test-gvfs-protocol/%s\r\n", git_version_string); + strbuf_addf( &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822))); + gen_fake_uuid(&uuid); + strbuf_addf( &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf); + strbuf_addstr(&response_header, "\r\n"); + + if (write_in_full(1, response_header.buf, response_header.len) < 0) { + logerror("unable to write response header"); + wr = WR_IO_ERROR; + goto done; + } + + /* send protocol version header */ + if (write_in_full(1, v1_h, sizeof(v1_h)) < 0) { + logerror("unabled to write v1_h"); + wr = WR_IO_ERROR; + goto done; + } + + /* send number of packfiles */ + np_le = my_get_le16(np); + if (write_in_full(1, &np_le, sizeof(np_le)) < 0) { + logerror("unable to write np"); + wr = WR_IO_ERROR; + goto done; + } + + for (k = 0; k < data->nr; k++) { + if (!want_ct_pack(data->items[k], last_timestamp)) + continue; + + wr = send_ct_item(data->items[k]); + if (wr != WR_OK) + goto done; + } + + wr = WR_OK; + +done: + strbuf_release(&uuid); + strbuf_release(&response_header); + + return wr; +} + +static enum worker_result do__gvfs_prefetch__get(struct req *req) +{ + struct ct_pack_data data; + timestamp_t last_timestamp = 0; + enum worker_result wr; + + memset(&data, 0, sizeof(data)); + + if (req->quest_args.len) { + const char *key = strstr(req->quest_args.buf, "lastPackTimestamp="); + if (key) { + const char *val; + if (skip_prefix(key, "lastPackTimestamp=", &val)) { + last_timestamp = strtol(val, NULL, 10); + } + } + } + trace2_printf("%s: prefetch/since %"PRItime, TR2_CAT, last_timestamp); + + for_each_file_in_pack_dir(get_object_directory(), cb_ct_pack, &data); + QSORT(data.items, data.nr, ct_pack_sort_compare); + + wr = send_multipack(&data, last_timestamp); + + ct_pack_data__release(&data); + + return wr; +} + /* * Read the HTTP request up to the start of the optional message-body. * We do this byte-by-byte because we have keep-alive turned on and @@ -1141,6 +1437,11 @@ static enum worker_result req__read(struct req *req, int fd) * We let our caller read/chunk it in as appropriate. */ done: + +#if 0 + /* + * This is useful for debugging the request, but very noisy. + */ if (trace2_is_enabled()) { struct string_list_item *item; trace2_printf("%s: %s", TR2_CAT, req->start_line.buf); @@ -1155,6 +1456,7 @@ static enum worker_result req__read(struct req *req, int fd) for_each_string_list_item(item, &req->header_list) trace2_printf("%s: Hdrs: %s", TR2_CAT, item->string); } +#endif return WR_OK; } @@ -1203,6 +1505,12 @@ static enum worker_result dispatch(struct req *req) return do__gvfs_config__get(req); } + if (!strcmp(req->gvfs_api.buf, "gvfs/prefetch")) { + + if (!strcmp(method, "GET")) + return do__gvfs_prefetch__get(req); + } + return send_http_error(1, 501, "Not Implemented", -1, WR_OK | WR_HANGUP); } diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh index 7054e163712f9e..efaca1f2e4d645 100755 --- a/t/t5799-gvfs-helper.sh +++ b/t/t5799-gvfs-helper.sh @@ -24,8 +24,8 @@ test_set_port GIT_TEST_GVFS_PROTOCOL_PORT # actually use it). We are only testing explicit object # fetching using gvfs-helper.exe in isolation. # -REPO_SRC="$PWD"/repo_src -REPO_T1="$PWD"/repo_t1 +REPO_SRC="$(pwd)"/repo_src +REPO_T1="$(pwd)"/repo_t1 # Setup some loopback URLs where test-gvfs-protocol.exe will be # listening. We will spawn it directly inside the repo_src directory, @@ -44,22 +44,22 @@ HOST_PORT=127.0.0.1:$GIT_TEST_GVFS_PROTOCOL_PORT ORIGIN_URL=http://$HOST_PORT/servertype/origin CACHE_URL=http://$HOST_PORT/servertype/cache -SHARED_CACHE_T1="$PWD"/shared_cache_t1 +SHARED_CACHE_T1="$(pwd)"/shared_cache_t1 # The pid-file is created by test-gvfs-protocol.exe when it starts. # The server will shut down if/when we delete it. (This is a little # easier than killing it by PID.) # -PID_FILE="$PWD"/pid-file.pid -SERVER_LOG="$PWD"/OUT.server.log +PID_FILE="$(pwd)"/pid-file.pid +SERVER_LOG="$(pwd)"/OUT.server.log PATH="$GIT_BUILD_DIR/t/helper/:$PATH" && export PATH -OIDS_FILE="$PWD"/oid_list.txt -OIDS_CT_FILE="$PWD"/oid_ct_list.txt -OIDS_BLOBS_FILE="$PWD"/oids_blobs_file.txt -OID_ONE_BLOB_FILE="$PWD"/oid_one_blob_file.txt -OID_ONE_COMMIT_FILE="$PWD"/oid_one_commit_file.txt +OIDS_FILE="$(pwd)"/oid_list.txt +OIDS_CT_FILE="$(pwd)"/oid_ct_list.txt +OIDS_BLOBS_FILE="$(pwd)"/oids_blobs_file.txt +OID_ONE_BLOB_FILE="$(pwd)"/oid_one_blob_file.txt +OID_ONE_COMMIT_FILE="$(pwd)"/oid_one_commit_file.txt # Get a list of available OIDs in repo_src so that we can try to fetch # them and so that we don't have to hard-code a list of known OIDs. @@ -108,15 +108,46 @@ get_one_commit_oid () { return 0 } +# Create a commits-and-trees packfile for use with "prefetch" +# using the given range of commits. +# +create_commits_and_trees_packfile () { + if test $# -eq 2 + then + epoch=$1 + revs=$2 + else + echo "create_commits_and_trees_packfile: Need 2 args" + return 1 + fi + + pack_file="$REPO_SRC"/.git/objects/pack/ct-$epoch.pack + idx_file="$REPO_SRC"/.git/objects/pack/ct-$epoch.idx + + git -C "$REPO_SRC" pack-objects --stdout --revs --filter=blob:none \ + >"$pack_file" <<-EOF + $revs + EOF + git -C "$REPO_SRC" index-pack -o "$idx_file" "$pack_file" + return 0 +} + test_expect_success 'setup repos' ' test_create_repo "$REPO_SRC" && # # test_commit_bulk() does magic to create a packfile containing # the new commits. # + # We create branches in repo_src, but also remember the branch OIDs + # in files so that we can refer to them in repo_t1, which will not + # have the commits locally (because we do not clone or fetch). + # test_commit_bulk -C "$REPO_SRC" --filename="batch_a.%s.t" 9 && + git -C "$REPO_SRC" branch B1 && cp "$REPO_SRC"/.git/refs/heads/master m1.branch && + # test_commit_bulk -C "$REPO_SRC" --filename="batch_b.%s.t" 9 && + git -C "$REPO_SRC" branch B2 && cp "$REPO_SRC"/.git/refs/heads/master m2.branch && # # test_commit() creates commits, trees, tags, and blobs and leave @@ -133,8 +164,16 @@ test_expect_success 'setup repos' ' test_commit -C "$REPO_SRC" file7.txt && test_commit -C "$REPO_SRC" file8.txt && test_commit -C "$REPO_SRC" file9.txt && + git -C "$REPO_SRC" branch B3 && cp "$REPO_SRC"/.git/refs/heads/master m3.branch && # + # Create some commits-and-trees-only packfiles for testing prefetch. + # Set arbitrary EPOCH times to make it easier to test fetch-since. + # + create_commits_and_trees_packfile 1000000000 B1 && + create_commits_and_trees_packfile 1100000000 B1..B2 && + create_commits_and_trees_packfile 1200000000 B2..B3 && + # # gvfs-helper.exe writes downloaded objects to a shared-cache directory # rather than the ODB inside the .git directory. # @@ -158,10 +197,10 @@ test_expect_success 'setup repos' ' EOF cat <<-EOF >creds.sh && #!/bin/sh - cat "$PWD"/creds.txt + cat "$(pwd)"/creds.txt EOF chmod 755 creds.sh && - git -C "$REPO_T1" config --local credential.helper "!f() { cat \"$PWD\"/creds.txt; }; f" && + git -C "$REPO_T1" config --local credential.helper "!f() { cat \"$(pwd)\"/creds.txt; }; f" && # # Create some test data sets. # @@ -552,8 +591,8 @@ test_expect_success 'basic: POST-request a single blob' ' # Request a single commit via POST. Per the GVFS Protocol, the server # should implicitly send us a packfile containing the commit and the # trees it references. Confirm that properly handled the receipt of -# the packfile. (Here, we are testing that asking for a single object -# yields a packfile rather than a loose object.) +# the packfile. (Here, we are testing that asking for a single commit +# via POST yields a packfile rather than a loose object.) # # We DO NOT verify that the packfile contains commits/trees and no blobs # because our test helper doesn't implement the filtering. @@ -585,6 +624,105 @@ test_expect_success 'basic: POST-request a single commit' ' verify_connection_count 1 ' +test_expect_success 'basic: PREFETCH w/o arg gets all' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # Without a "since" argument gives us all "ct-*.pack" since the EPOCH + # because we do not have any prefetch packs locally. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + --no-progress \ + prefetch >OUT.output && + + # gvfs-helper prints a "packfile " message for each received + # packfile. + # + verify_received_packfile_count 3 && + + stop_gvfs_protocol_server && + verify_connection_count 1 +' + +test_expect_success 'basic: PREFETCH w/ arg' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # Ask for cached packfiles NEWER THAN the given time. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + --no-progress \ + prefetch --since="1000000000" >OUT.output && + + # gvfs-helper prints a "packfile " message for each received + # packfile. + # + verify_received_packfile_count 2 && + + stop_gvfs_protocol_server && + verify_connection_count 1 +' + +test_expect_success 'basic: PREFETCH mayhem no_prefetch_idx' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem no_prefetch_idx && + + # Request prefetch packs, but tell server to not send any + # idx files and force gvfs-helper to compute them. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + --no-progress \ + prefetch --since="1000000000" >OUT.output && + + # gvfs-helper prints a "packfile " message for each received + # packfile. + # + verify_received_packfile_count 2 && + + stop_gvfs_protocol_server && + verify_connection_count 1 +' + +test_expect_success 'basic: PREFETCH up-to-date' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # Ask for cached packfiles NEWER THAN the given time. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + --no-progress \ + prefetch --since="1000000000" >OUT.output && + + # gvfs-helper prints a "packfile " message for each received + # packfile. + # + verify_received_packfile_count 2 && + + # Ask again for any packfiles newer than what we have cached locally. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + --no-progress \ + prefetch >OUT.output && + + # gvfs-helper prints a "packfile " message for each received + # packfile. + # + verify_received_packfile_count 0 && + + stop_gvfs_protocol_server && + verify_connection_count 2 +' + ################################################################# # Tests to see how gvfs-helper responds to network problems. # @@ -958,44 +1096,6 @@ test_expect_success 'HTTP GET Auth on Cache Server' ' # magically fetched whenever required. ################################################################# -test_expect_success 'integration: explicit commit/trees, implicit blobs: log file' ' - test_when_finished "per_test_cleanup" && - start_gvfs_protocol_server && - - # We have a very empty repo. Seed it with all of the commits - # and trees. The purpose of this test is to demand-load the - # needed blobs only, so we prefetch the commits and trees. - # - git -C "$REPO_T1" gvfs-helper \ - --cache-server=disable \ - --remote=origin \ - get \ - <"$OIDS_CT_FILE" >OUT.output && - - # Confirm that we do not have the blobs locally. - # With gvfs-helper turned off, we should fail. - # - test_must_fail \ - git -C "$REPO_T1" -c core.usegvfshelper=false \ - log $(cat m3.brach) -- file9.txt \ - >OUT.output 2>OUT.stderr && - - # Turn on gvfs-helper and retry. This should implicitly fetch - # any needed blobs. - # - git -C "$REPO_T1" -c core.usegvfshelper=true \ - log $(cat m3.branch) -- file9.txt \ - >OUT.output 2>OUT.stderr && - - # Verify that gvfs-helper wrote the fetched the blobs to the - # local ODB, such that a second attempt with gvfs-helper - # turned off should succeed. - # - git -C "$REPO_T1" -c core.usegvfshelper=false \ - log $(cat m3.branch) -- file9.txt \ - >OUT.output 2>OUT.stderr -' - test_expect_success 'integration: explicit commit/trees, implicit blobs: diff 2 commits' ' test_when_finished "per_test_cleanup" && start_gvfs_protocol_server && From c041e732ee781dbf70d2c045a00b13ad0a2909f3 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Mon, 21 Oct 2019 12:29:19 -0400 Subject: [PATCH 081/104] gvfs-helper: expose gvfs/objects GET and POST semantics Expose the differences in the semantics of GET and POST for the "gvfs/objects" API: HTTP GET: fetches a single loose object over the network. When a commit object is requested, it just returns the single object. HTTP POST: fetches a batch of objects over the network. When the oid-set contains a commit object, all referenced trees are also included in the response. gvfs-helper is updated to take "get" and "post" command line options. the gvfs-helper "server" mode is updated to take "objects.get" and "objects.post" verbs. For convenience, the "get" option and the "objects.get" verb do allow more than one object to be requested. gvfs-helper will automatically issue a series of (single object) HTTP GET requests and creating a series of loose objects. The "post" option and the "objects.post" verb will perform bulk object fetching using the batch-size chunking. Individual HTTP POST requests containing more than one object will be created as a packfile. A HTTP POST for a single object will create a loose object. This commit also contains some refactoring to eliminate the assumption that POST is always associated with packfiles. In gvfs-helper-client.c, gh_client__get_immediate() now uses the "objects.get" verb and ignores any currently queued objects. In gvfs-helper-client.c, the OIDSET built by gh_client__queue_oid() is only processed when gh_client__drain_queue() is called. The queue is processed using the "object.post" verb. Signed-off-by: Jeff Hostetler --- gvfs-helper-client.c | 226 +++++++++----- gvfs-helper.c | 712 +++++++++++++++++++++++++++---------------- 2 files changed, 607 insertions(+), 331 deletions(-) diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c index ae2c5d1951b5ec..9bb880338876bf 100644 --- a/gvfs-helper-client.c +++ b/gvfs-helper-client.c @@ -13,7 +13,6 @@ static struct oidset gh_client__oidset_queued = OIDSET_INIT; static unsigned long gh_client__oidset_count; -static int gh_client__includes_immediate; struct gh_server__process { struct subprocess_entry subprocess; /* must be first */ @@ -24,13 +23,20 @@ static int gh_server__subprocess_map_initialized; static struct hashmap gh_server__subprocess_map; static struct object_directory *gh_client__chosen_odb; -#define CAP_GET (1u<<1) +/* + * The "objects" capability has 2 verbs: "get" and "post". + */ +#define CAP_OBJECTS (1u<<1) +#define CAP_OBJECTS_NAME "objects" + +#define CAP_OBJECTS__VERB_GET1_NAME "get" +#define CAP_OBJECTS__VERB_POST_NAME "post" static int gh_client__start_fn(struct subprocess_entry *subprocess) { static int versions[] = {1, 0}; static struct subprocess_capability capabilities[] = { - { "get", CAP_GET }, + { CAP_OBJECTS_NAME, CAP_OBJECTS }, { NULL, 0 } }; @@ -42,14 +48,16 @@ static int gh_client__start_fn(struct subprocess_entry *subprocess) } /* - * Send: + * Send the queued OIDs in the OIDSET to gvfs-helper for it to + * fetch from the cache-server or main Git server using "/gvfs/objects" + * POST semantics. * - * get LF + * objects.post LF * ( LF)* * * */ -static int gh_client__get__send_command(struct child_process *process) +static int gh_client__send__objects_post(struct child_process *process) { struct oidset_iter iter; struct object_id *oid; @@ -60,7 +68,9 @@ static int gh_client__get__send_command(struct child_process *process) * so that we don't have to. */ - err = packet_write_fmt_gently(process->in, "get\n"); + err = packet_write_fmt_gently( + process->in, + (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_POST_NAME "\n")); if (err) return err; @@ -79,6 +89,46 @@ static int gh_client__get__send_command(struct child_process *process) return 0; } +/* + * Send the given OID to gvfs-helper for it to fetch from the + * cache-server or main Git server using "/gvfs/objects" GET + * semantics. + * + * This ignores any queued OIDs. + * + * objects.get LF + * LF + * + * + */ +static int gh_client__send__objects_get(struct child_process *process, + const struct object_id *oid) +{ + int err; + + /* + * We assume that all of the packet_ routines call error() + * so that we don't have to. + */ + + err = packet_write_fmt_gently( + process->in, + (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_GET1_NAME "\n")); + if (err) + return err; + + err = packet_write_fmt_gently(process->in, "%s\n", + oid_to_hex(oid)); + if (err) + return err; + + err = packet_flush_gently(process->in); + if (err) + return err; + + return 0; +} + /* * Verify that the pathname found in the "odb" response line matches * what we requested. @@ -148,7 +198,7 @@ static void gh_client__update_packed_git(const char *line) } /* - * We expect: + * Both CAP_OBJECTS verbs return the same format response: * * * * @@ -179,7 +229,7 @@ static void gh_client__update_packed_git(const char *line) * grouped with a queued request for a blob. The tree-walk *might* be * able to continue and let the 404 blob be handled later. */ -static int gh_client__get__receive_response( +static int gh_client__objects__receive_response( struct child_process *process, enum gh_client__created *p_ghc, int *p_nr_loose, int *p_nr_packfile) @@ -258,17 +308,12 @@ static void gh_client__choose_odb(void) } } -static int gh_client__get(enum gh_client__created *p_ghc) +static struct gh_server__process *gh_client__find_long_running_process( + unsigned int cap_needed) { struct gh_server__process *entry; - struct child_process *process; struct argv_array argv = ARGV_ARRAY_INIT; struct strbuf quoted = STRBUF_INIT; - int nr_loose = 0; - int nr_packfile = 0; - int err = 0; - - trace2_region_enter("gh-client", "get", the_repository); gh_client__choose_odb(); @@ -284,6 +329,11 @@ static int gh_client__get(enum gh_client__created *p_ghc) sq_quote_argv_pretty("ed, argv.argv); + /* + * Find an existing long-running process with the above command + * line -or- create a new long-running process for this and + * subsequent 'get' requests. + */ if (!gh_server__subprocess_map_initialized) { gh_server__subprocess_map_initialized = 1; hashmap_init(&gh_server__subprocess_map, @@ -297,70 +347,24 @@ static int gh_client__get(enum gh_client__created *p_ghc) entry = xmalloc(sizeof(*entry)); entry->supported_capabilities = 0; - err = subprocess_start_argv( - &gh_server__subprocess_map, &entry->subprocess, 1, - &argv, gh_client__start_fn); - if (err) { - free(entry); - goto leave_region; - } + if (subprocess_start_argv(&gh_server__subprocess_map, + &entry->subprocess, 1, + &argv, gh_client__start_fn)) + FREE_AND_NULL(entry); } - process = &entry->subprocess.process; - - if (!(CAP_GET & entry->supported_capabilities)) { - error("gvfs-helper: does not support GET"); - subprocess_stop(&gh_server__subprocess_map, - (struct subprocess_entry *)entry); - free(entry); - err = -1; - goto leave_region; - } - - sigchain_push(SIGPIPE, SIG_IGN); - - err = gh_client__get__send_command(process); - if (!err) - err = gh_client__get__receive_response(process, p_ghc, - &nr_loose, &nr_packfile); - - sigchain_pop(SIGPIPE); - - if (err) { + if (entry && + (entry->supported_capabilities & cap_needed) != cap_needed) { + error("gvfs-helper: does not support needed capabilities"); subprocess_stop(&gh_server__subprocess_map, (struct subprocess_entry *)entry); - free(entry); + FREE_AND_NULL(entry); } -leave_region: argv_array_clear(&argv); strbuf_release("ed); - trace2_data_intmax("gh-client", the_repository, - "get/immediate", gh_client__includes_immediate); - - trace2_data_intmax("gh-client", the_repository, - "get/nr_objects", gh_client__oidset_count); - - if (nr_loose) - trace2_data_intmax("gh-client", the_repository, - "get/nr_loose", nr_loose); - - if (nr_packfile) - trace2_data_intmax("gh-client", the_repository, - "get/nr_packfile", nr_packfile); - - if (err) - trace2_data_intmax("gh-client", the_repository, - "get/error", err); - - trace2_region_leave("gh-client", "get", the_repository); - - oidset_clear(&gh_client__oidset_queued); - gh_client__oidset_count = 0; - gh_client__includes_immediate = 0; - - return err; + return entry; } void gh_client__queue_oid(const struct object_id *oid) @@ -387,27 +391,97 @@ void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr) gh_client__queue_oid(&oids[k]); } +/* + * Bulk fetch all of the queued OIDs in the OIDSET. + */ int gh_client__drain_queue(enum gh_client__created *p_ghc) { + struct gh_server__process *entry; + struct child_process *process; + int nr_loose = 0; + int nr_packfile = 0; + int err = 0; + *p_ghc = GHC__CREATED__NOTHING; if (!gh_client__oidset_count) return 0; - return gh_client__get(p_ghc); + entry = gh_client__find_long_running_process(CAP_OBJECTS); + if (!entry) + return -1; + + trace2_region_enter("gh-client", "objects/post", the_repository); + + process = &entry->subprocess.process; + + sigchain_push(SIGPIPE, SIG_IGN); + + err = gh_client__send__objects_post(process); + if (!err) + err = gh_client__objects__receive_response( + process, p_ghc, &nr_loose, &nr_packfile); + + sigchain_pop(SIGPIPE); + + if (err) { + subprocess_stop(&gh_server__subprocess_map, + (struct subprocess_entry *)entry); + FREE_AND_NULL(entry); + } + + trace2_data_intmax("gh-client", the_repository, + "objects/post/nr_objects", gh_client__oidset_count); + trace2_region_leave("gh-client", "objects/post", the_repository); + + oidset_clear(&gh_client__oidset_queued); + gh_client__oidset_count = 0; + + return err; } + +/* + * Get exactly 1 object immediately. + * Ignore any queued objects. + */ int gh_client__get_immediate(const struct object_id *oid, enum gh_client__created *p_ghc) { - gh_client__includes_immediate = 1; + struct gh_server__process *entry; + struct child_process *process; + int nr_loose = 0; + int nr_packfile = 0; + int err = 0; // TODO consider removing this trace2. it is useful for interactive // TODO debugging, but may generate way too much noise for a data // TODO event. trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid)); - if (!oidset_insert(&gh_client__oidset_queued, oid)) - gh_client__oidset_count++; + entry = gh_client__find_long_running_process(CAP_OBJECTS); + if (!entry) + return -1; + + trace2_region_enter("gh-client", "objects/get", the_repository); - return gh_client__drain_queue(p_ghc); + process = &entry->subprocess.process; + + sigchain_push(SIGPIPE, SIG_IGN); + + err = gh_client__send__objects_get(process, oid); + if (!err) + err = gh_client__objects__receive_response( + process, p_ghc, &nr_loose, &nr_packfile); + + sigchain_pop(SIGPIPE); + + if (err) { + subprocess_stop(&gh_server__subprocess_map, + (struct subprocess_entry *)entry); + FREE_AND_NULL(entry); + } + + trace2_region_leave("gh-client", "objects/get", the_repository); + + return err; } diff --git a/gvfs-helper.c b/gvfs-helper.c index 33a623edf856da..abae824cdadaed 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -46,7 +46,28 @@ // // get // -// Fetch 1 or more objects. If a cache-server is configured, +// Fetch 1 or more objects one at a time using a "/gvfs/objects" +// GET request. +// +// If a cache-server is configured, +// try it first. Optionally fallback to the main Git server. +// +// The set of objects is given on stdin and is assumed to be +// a list of , one per line. +// +// : +// +// --max-retries= // defaults to "6" +// +// Number of retries after transient network errors. +// Set to zero to disable such retries. +// +// post +// +// Fetch 1 or more objects in bulk using a "/gvfs/objects" POST +// request. +// +// If a cache-server is configured, // try it first. Optionally fallback to the main Git server. // // The set of objects is given on stdin and is assumed to be @@ -78,7 +99,8 @@ // --block-size= // defaults to "4000" // // Request objects from server in batches of at -// most n objects (not bytes). +// most n objects (not bytes) when using POST +// requests. // // --depth= // defaults to "1" // @@ -87,17 +109,27 @@ // Number of retries after transient network errors. // Set to zero to disable such retries. // -// Interactive verb: get +// Interactive verb: objects.get +// +// Fetch 1 or more objects, one at a time, using a +// "/gvfs/ojbects" GET requests. +// +// Each object will be created as a loose object in the ODB. +// +// Interactive verb: objects.post // -// Fetch 1 or more objects. If a cache-server is configured, -// try it first. Optionally fallback to the main Git server. +// Fetch 1 or more objects, in bulk, using one or more +// "/gvfs/objects" POST requests. +// +// For both verbs, if a cache-server is configured, try it first. +// Optionally fallback to the main Git server. // // Create 1 or more loose objects and/or packfiles in the // shared-cache ODB. (The pathname of the selected ODB is // reported at the beginning of the response; this should // match the pathname given on the command line). // -// git> get +// git> objects.get | objects.post // git> // git> // git> ... @@ -116,20 +148,6 @@ // [2] Documentation/technical/long-running-process-protocol.txt // [3] See GIT_TRACE_PACKET // -// Example: -// -// $ git -c core.virtualizeobjects=false -c core.usegvfshelper=false -// rev-list --objects --no-walk --missing=print HEAD -// | grep "^?" -// | sed 's/^?//' -// | git gvfs-helper get-missing -// -// Note: In this example, we need to turn off "core.virtualizeobjects" and -// "core.usegvfshelper" when building the list of objects. This prevents -// rev-list (in oid_object_info_extended() from automatically fetching -// them with read-object-hook or "gvfs-helper server" sub-process (and -// defeating the whole purpose of this example). -// ////////////////////////////////////////////////////////////////// #include "cache.h" @@ -162,15 +180,21 @@ static const char * const main_usage[] = { N_("git gvfs-helper [] config []"), N_("git gvfs-helper [] get []"), + N_("git gvfs-helper [] post []"), N_("git gvfs-helper [] server []"), NULL }; -static const char *const get_usage[] = { +static const char *const objects_get_usage[] = { N_("git gvfs-helper [] get []"), NULL }; +static const char *const objects_post_usage[] = { + N_("git gvfs-helper [] post []"), + NULL +}; + static const char *const server_usage[] = { N_("git gvfs-helper [] server []"), NULL @@ -179,12 +203,12 @@ static const char *const server_usage[] = { /* * "commitDepth" field in gvfs protocol */ -#define GH__DEFAULT_COMMIT_DEPTH 1 +#define GH__DEFAULT__OBJECTS_POST__COMMIT_DEPTH 1 /* * Chunk/block size in number of objects we request in each packfile */ -#define GH__DEFAULT_BLOCK_SIZE 4000 +#define GH__DEFAULT__OBJECTS_POST__BLOCK_SIZE 4000 /* * Retry attempts (after the initial request) for transient errors and 429s. @@ -278,6 +302,28 @@ static const char *gh__server_type_label[GH__SERVER_TYPE__NR] = { "(cs)" }; +enum gh__objects_mode { + /* + * Bulk fetch objects. + * + * But also, force the use of HTTP POST regardless of how many + * objects we are requesting. + * + * The GVFS Protocol treats requests for commit objects + * differently in GET and POST requests WRT whether it + * automatically also fetches the referenced trees. + */ + GH__OBJECTS_MODE__POST, + + /* + * Fetch objects one at a time using HTTP GET. + * + * Force the use of GET (primarily because of the commit + * object treatment). + */ + GH__OBJECTS_MODE__GET, +}; + struct gh__azure_throttle { unsigned long tstu_limit; @@ -333,7 +379,20 @@ enum gh__progress_state { * Parameters to drive an HTTP request (with any necessary retries). */ struct gh__request_params { - int b_is_post; /* POST=1 or GET=0 */ + /* + * b_is_post indicates if the current HTTP request is a POST=1 or + * a GET=0. This is a lower level field used to setup CURL and + * the tempfile used to receive the content. + * + * It is related to, but different from the GH__OBJECTS_MODE__ + * field that we present to the gvfs-helper client or in the CLI + * (which only concerns the semantics of the /gvfs/objects protocol + * on the set of requested OIDs). + * + * For example, we use an HTTP GET to get the /gvfs/config data + * into a buffer. + */ + int b_is_post; int b_write_to_file; /* write to file=1 or strbuf=0 */ int b_permit_cache_server_if_defined; @@ -496,8 +555,6 @@ enum gh__retry_mode { struct gh__response_status { struct strbuf error_message; struct strbuf content_type; - long response_code; /* http response code */ - CURLcode curl_code; enum gh__error_code ec; enum gh__retry_mode retry; intmax_t bytes_received; @@ -507,8 +564,6 @@ struct gh__response_status { #define GH__RESPONSE_STATUS_INIT { \ .error_message = STRBUF_INIT, \ .content_type = STRBUF_INIT, \ - .response_code = 0, \ - .curl_code = CURLE_OK, \ .ec = GH__ERROR_CODE__OK, \ .retry = GH__RETRY_MODE__SUCCESS, \ .bytes_received = 0, \ @@ -519,8 +574,6 @@ static void gh__response_status__zero(struct gh__response_status *s) { strbuf_setlen(&s->error_message, 0); strbuf_setlen(&s->content_type, 0); - s->response_code = 0; - s->curl_code = CURLE_OK; s->ec = GH__ERROR_CODE__OK; s->retry = GH__RETRY_MODE__SUCCESS; s->bytes_received = 0; @@ -574,15 +627,14 @@ static void log_e2eid(struct gh__request_params *params, } /* - * Normalize a few error codes before we try to decide + * Normalize a few HTTP response codes before we try to decide * how to dispatch on them. */ -static void gh__response_status__normalize_odd_codes( - struct gh__request_params *params, - struct gh__response_status *status) +static long gh__normalize_odd_codes(struct gh__request_params *params, + long http_response_code) { if (params->server_type == GH__SERVER_TYPE__CACHE && - status->response_code == 400) { + http_response_code == 400) { /* * The cache-server sends a somewhat bogus 400 instead of * the normal 401 when AUTH is required. Fixup the status @@ -594,16 +646,18 @@ static void gh__response_status__normalize_odd_codes( * TODO 401 for now. We should confirm the expected * TODO error message in the response-body. */ - status->response_code = 401; + return 401; } - if (status->response_code == 203) { + if (http_response_code == 203) { /* * A proxy server transformed a 200 from the origin server * into a 203. We don't care about the subtle distinction. */ - status->response_code = 200; + return 200; } + + return http_response_code; } /* @@ -613,9 +667,10 @@ static void gh__response_status__normalize_odd_codes( * https://docs.microsoft.com/en-us/azure/devops/integrate/concepts/rate-limits?view=azure-devops */ static void compute_retry_mode_from_http_response( - struct gh__response_status *status) + struct gh__response_status *status, + long http_response_code) { - switch (status->response_code) { + switch (http_response_code) { case 200: status->retry = GH__RETRY_MODE__SUCCESS; @@ -687,7 +742,7 @@ static void compute_retry_mode_from_http_response( hard_fail: strbuf_addf(&status->error_message, "(http:%d) Other [hard_fail]", - (int)status->response_code); + (int)http_response_code); status->retry = GH__RETRY_MODE__HARD_FAIL; status->ec = GH__ERROR_CODE__HTTP_OTHER; @@ -713,9 +768,10 @@ static void compute_retry_mode_from_http_response( * so I'm not going to fight that. */ static void compute_retry_mode_from_curl_error( - struct gh__response_status *status) + struct gh__response_status *status, + CURLcode curl_code) { - switch (status->curl_code) { + switch (curl_code) { case CURLE_OK: status->retry = GH__RETRY_MODE__SUCCESS; status->ec = GH__ERROR_CODE__OK; @@ -820,8 +876,7 @@ static void compute_retry_mode_from_curl_error( hard_fail: strbuf_addf(&status->error_message, "(curl:%d) %s [hard_fail]", - status->curl_code, - curl_easy_strerror(status->curl_code)); + curl_code, curl_easy_strerror(curl_code)); status->retry = GH__RETRY_MODE__HARD_FAIL; status->ec = GH__ERROR_CODE__CURL_ERROR; @@ -831,8 +886,7 @@ static void compute_retry_mode_from_curl_error( transient: strbuf_addf(&status->error_message, "(curl:%d) %s [transient]", - status->curl_code, - curl_easy_strerror(status->curl_code)); + curl_code, curl_easy_strerror(curl_code)); status->retry = GH__RETRY_MODE__TRANSIENT; status->ec = GH__ERROR_CODE__CURL_ERROR; @@ -851,26 +905,31 @@ static void gh__response_status__set_from_slot( struct gh__response_status *status, const struct active_request_slot *slot) { - status->curl_code = slot->results->curl_result; + long http_response_code; + CURLcode curl_code; + + curl_code = slot->results->curl_result; gh__curlinfo_strbuf(slot->curl, CURLINFO_CONTENT_TYPE, &status->content_type); curl_easy_getinfo(slot->curl, CURLINFO_RESPONSE_CODE, - &status->response_code); + &http_response_code); strbuf_setlen(&status->error_message, 0); - gh__response_status__normalize_odd_codes(params, status); + http_response_code = gh__normalize_odd_codes(params, + http_response_code); /* * Use normalized response/status codes form curl/http to decide * how to set the error-code we propagate *AND* to decide if we * we should retry because of transient network problems. */ - if (status->curl_code == CURLE_OK || - status->curl_code == CURLE_HTTP_RETURNED_ERROR) - compute_retry_mode_from_http_response(status); + if (curl_code == CURLE_OK || + curl_code == CURLE_HTTP_RETURNED_ERROR) + compute_retry_mode_from_http_response(status, + http_response_code); else - compute_retry_mode_from_curl_error(status); + compute_retry_mode_from_curl_error(status, curl_code); if (status->ec != GH__ERROR_CODE__OK) status->bytes_received = 0; @@ -1028,8 +1087,8 @@ static void gh__run_one_slot(struct active_request_slot *slot, trace2_region_enter("gvfs-helper", key.buf, NULL); if (!start_active_slot(slot)) { - status->curl_code = CURLE_FAILED_INIT; /* a bit of a lie */ - compute_retry_mode_from_curl_error(status); + compute_retry_mode_from_curl_error(status, + CURLE_FAILED_INIT); } else { run_active_slot(slot); if (params->b_write_to_file) @@ -1060,7 +1119,7 @@ static void gh__run_one_slot(struct active_request_slot *slot, stop_progress(¶ms->progress); if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file) { - if (params->b_is_post) + if (params->b_is_post && params->object_count > 1) install_packfile(params, status); else install_loose(params, status); @@ -1216,8 +1275,8 @@ static void lookup_main_url(void) trace2_data_string("gvfs-helper", NULL, "remote/url", gh__global.main_url); } -static void do__gvfs_config(struct gh__response_status *status, - struct strbuf *config_data); +static void do__http_get__gvfs_config(struct gh__response_status *status, + struct strbuf *config_data); /* * Find the URL of the cache-server, if we have one. @@ -1276,7 +1335,7 @@ static void select_cache_server(void) * well-known by the main Git server. */ - do__gvfs_config(&status, &config_data); + do__http_get__gvfs_config(&status, &config_data); if (status.ec == GH__ERROR_CODE__OK) { /* @@ -1334,13 +1393,10 @@ static void select_cache_server(void) * Read stdin until EOF (or a blank line) and add the desired OIDs * to the oidset. * - * Stdin should contain a list of OIDs. It may have additional - * decoration that we need to strip out. - * - * We expect: - * [] // present OIDs + * Stdin should contain a list of OIDs. Lines may have additional + * text following the OID that we ignore. */ -static unsigned long read_stdin_from_rev_list(struct oidset *oids) +static unsigned long read_stdin_for_oids(struct oidset *oids) { struct object_id oid; struct strbuf buf_stdin = STRBUF_INIT; @@ -1362,17 +1418,23 @@ static unsigned long read_stdin_from_rev_list(struct oidset *oids) /* * Build a complete JSON payload for a gvfs/objects POST request - * containing the first n OIDs in an OIDSET index by the iterator. + * containing the first `nr_in_block` OIDs found in the OIDSET + * indexed by the given iterator. * * https://github.com/microsoft/VFSForGit/blob/master/Protocol.md + * + * Return the number of OIDs we actually put into the payload. + * If only 1 OID was found, also return it. */ static unsigned long build_json_payload__gvfs_objects( struct json_writer *jw_req, struct oidset_iter *iter, - unsigned long nr_in_block) + unsigned long nr_in_block, + struct object_id *oid_out) { unsigned long k; const struct object_id *oid; + const struct object_id *oid_prev = NULL; k = 0; @@ -1383,10 +1445,18 @@ static unsigned long build_json_payload__gvfs_objects( while (k < nr_in_block && (oid = oidset_iter_next(iter))) { jw_array_string(jw_req, oid_to_hex(oid)); k++; + oid_prev = oid; } jw_end(jw_req); jw_end(jw_req); + if (oid_out) { + if (k == 1) + oidcpy(oid_out, oid_prev); + else + oidclr(oid_out); + } + return k; } @@ -1627,6 +1697,33 @@ static void create_tempfile_for_packfile( strbuf_release(&basename); } +/* + * Create a pathname to the loose object in the shared-cache ODB + * with the given OID. Try to "mkdir -p" to ensure the parent + * directories exist. + */ +static int create_loose_pathname_in_odb(struct strbuf *buf_path, + const struct object_id *oid) +{ + enum scld_error scld; + const char *hex; + + hex = oid_to_hex(oid); + + strbuf_setlen(buf_path, 0); + strbuf_addbuf(buf_path, &gh__global.buf_odb_path); + strbuf_complete(buf_path, '/'); + strbuf_add(buf_path, hex, 2); + strbuf_addch(buf_path, '/'); + strbuf_addstr(buf_path, hex+2); + + scld = safe_create_leading_directories(buf_path->buf); + if (scld != SCLD_OK && scld != SCLD_EXISTS) + return -1; + + return 0; +} + /* * Create a tempfile to stream a loose object into. * @@ -1641,19 +1738,10 @@ static void create_tempfile_for_loose( { static int nth = 0; struct strbuf buf_path = STRBUF_INIT; - const char *hex; gh__response_status__zero(status); - hex = oid_to_hex(¶ms->loose_oid); - - strbuf_addbuf(&buf_path, &gh__global.buf_odb_path); - strbuf_complete(&buf_path, '/'); - strbuf_add(&buf_path, hex, 2); - - if (!file_exists(buf_path.buf) && - mkdir(buf_path.buf, 0777) == -1 && - !file_exists(buf_path.buf)) { + if (create_loose_pathname_in_odb(&buf_path, ¶ms->loose_oid)) { strbuf_addf(&status->error_message, "cannot create directory for loose object '%s'", buf_path.buf); @@ -1661,9 +1749,6 @@ static void create_tempfile_for_loose( goto cleanup; } - strbuf_addch(&buf_path, '/'); - strbuf_addstr(&buf_path, hex+2); - /* Remember the full path of the final destination. */ strbuf_setlen(¶ms->loose_path, 0); strbuf_addbuf(¶ms->loose_path, &buf_path); @@ -1705,7 +1790,7 @@ static void install_packfile(struct gh__request_params *params, if (strcmp(status->content_type.buf, "application/x-git-packfile")) { strbuf_addf(&status->error_message, - "received unknown content-type '%s'", + "install_packfile: received unknown content-type '%s'", status->content_type.buf); status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; goto cleanup; @@ -1783,6 +1868,19 @@ static void install_loose(struct gh__request_params *params, { struct strbuf tmp_path = STRBUF_INIT; + /* + * We expect a loose object when we do a GET -or- when we + * do a POST with only 1 object. + */ + if (strcmp(status->content_type.buf, + "application/x-git-loose-object")) { + strbuf_addf(&status->error_message, + "install_loose: received unknown content-type '%s'", + status->content_type.buf); + status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; + return; + } + gh__response_status__zero(status); /* @@ -1879,10 +1977,8 @@ static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems, * The following X- headers are specific to AzureDevOps. * Other servers have similar sets of values, but I haven't * compared them in depth. - * - * TODO Remove this. */ - trace2_printf("Throttle: %s %s", key.buf, val.buf); + // trace2_printf("Throttle: %s %s", key.buf, val.buf); if (!strcmp(key.buf, "X-RateLimit-Resource")) { /* @@ -2104,10 +2200,11 @@ static void do_req(const char *url_base, if (params->tempfile) delete_tempfile(¶ms->tempfile); - if (params->b_is_post) + if (params->b_is_post && params->object_count > 1) create_tempfile_for_packfile(params, status); else create_tempfile_for_loose(params, status); + if (!params->tempfile || status->ec != GH__ERROR_CODE__OK) return; } else { @@ -2308,7 +2405,7 @@ static void do_req__to_main(const char *url_component, &gh__global.main_creds, params, status); - if (status->response_code == 401) { + if (status->retry == GH__RETRY_MODE__HTTP_401) { refresh_main_creds(); do_req__with_robust_retry(gh__global.main_url, url_component, @@ -2316,7 +2413,7 @@ static void do_req__to_main(const char *url_component, params, status); } - if (status->response_code == 200) + if (status->retry == GH__RETRY_MODE__SUCCESS) approve_main_creds(); } @@ -2332,7 +2429,7 @@ static void do_req__to_cache_server(const char *url_component, &gh__global.cache_creds, params, status); - if (status->response_code == 401) { + if (status->retry == GH__RETRY_MODE__HTTP_401) { refresh_cache_server_creds(); do_req__with_robust_retry(gh__global.cache_server_url, @@ -2341,7 +2438,7 @@ static void do_req__to_cache_server(const char *url_component, params, status); } - if (status->response_code == 200) + if (status->retry == GH__RETRY_MODE__SUCCESS) approve_cache_server_creds(); } @@ -2356,7 +2453,7 @@ static void do_req__with_fallback(const char *url_component, params->b_permit_cache_server_if_defined) { do_req__to_cache_server(url_component, params, status); - if (status->response_code == 200) + if (status->retry == GH__RETRY_MODE__SUCCESS) return; if (!gh__cmd_opts.try_fallback) @@ -2371,7 +2468,7 @@ static void do_req__with_fallback(const char *url_component, * Falling-back would likely just cause the 3rd (or maybe * 4th) cred prompt. */ - if (status->response_code == 401) + if (status->retry == GH__RETRY_MODE__HTTP_401) return; } @@ -2383,8 +2480,8 @@ static void do_req__with_fallback(const char *url_component, * * Return server's response buffer. This is probably a raw JSON string. */ -static void do__gvfs_config(struct gh__response_status *status, - struct strbuf *config_data) +static void do__http_get__gvfs_config(struct gh__response_status *status, + struct strbuf *config_data) { struct gh__request_params params = GH__REQUEST_PARAMS_INIT; @@ -2424,12 +2521,34 @@ static void do__gvfs_config(struct gh__response_status *status, gh__request_params__release(¶ms); } +static void setup_gvfs_objects_progress(struct gh__request_params *params, + unsigned long num, unsigned long den) +{ + if (!gh__cmd_opts.show_progress) + return; + + if (params->b_is_post && params->object_count > 1) { + strbuf_addf(¶ms->progress_base_phase2_msg, + "Requesting packfile %ld/%ld with %ld objects", + num, den, params->object_count); + strbuf_addf(¶ms->progress_base_phase3_msg, + "Receiving packfile %ld/%ld with %ld objects", + num, den, params->object_count); + } else { + strbuf_addf(¶ms->progress_base_phase3_msg, + "Receiving %ld/%ld loose object", + num, den); + } +} + /* * Call "gvfs/objects/" REST API to fetch a loose object * and write it to the ODB. */ -static void do__loose__gvfs_object(struct gh__response_status *status, - const struct object_id *oid) +static void do__http_get__gvfs_object(struct gh__response_status *status, + const struct object_id *oid, + unsigned long l_num, unsigned long l_den, + struct string_list *result_list) { struct gh__request_params params = GH__REQUEST_PARAMS_INIT; struct strbuf component_url = STRBUF_INIT; @@ -2454,44 +2573,52 @@ static void do__loose__gvfs_object(struct gh__response_status *status, oidcpy(¶ms.loose_oid, oid); - if (gh__cmd_opts.show_progress) { - /* - * Likewise, a gvfs/objects/{oid} has a very small reqest - * payload, so I don't see any need to report progress on - * the upload side of the GET. So just report progress - * on the download side. - */ - strbuf_addstr(¶ms.progress_base_phase3_msg, - "Receiving 1 loose object"); - } + setup_gvfs_objects_progress(¶ms, l_num, l_den); do_req__with_fallback(component_url.buf, ¶ms, status); + if (status->ec == GH__ERROR_CODE__OK) { + struct strbuf msg = STRBUF_INIT; + + strbuf_addf(&msg, "loose %s", + oid_to_hex(¶ms.loose_oid)); + + string_list_append(result_list, msg.buf); + strbuf_release(&msg); + } + gh__request_params__release(¶ms); strbuf_release(&component_url); } /* - * Call "gvfs/objects" POST REST API to fetch a packfile containing - * the objects in the requested OIDSET. Returns the filename (not - * pathname) to the new packfile. + * Call "gvfs/objects" POST REST API to fetch a batch of objects + * from the OIDSET. Normal, this is results in a packfile containing + * `nr_wanted_in_block` objects. And we return the number actually + * consumed (along with the filename of the resulting packfile). + * + * However, if we only have 1 oid (remaining) in the OIDSET, the + * server will respond to our POST with a loose object rather than + * a packfile with 1 object. + * + * Append a message to the result_list describing the result. + * + * Return the number of OIDs consumed from the OIDSET. */ -static void do__packfile__gvfs_objects(struct gh__response_status *status, - struct oidset_iter *iter, - unsigned long nr_wanted_in_block, - int j_pack_num, int j_pack_den, - struct strbuf *output_filename, - unsigned long *nr_oid_taken) +static void do__http_post__gvfs_objects(struct gh__response_status *status, + struct oidset_iter *iter, + unsigned long nr_wanted_in_block, + int j_pack_num, int j_pack_den, + struct string_list *result_list, + unsigned long *nr_oid_taken) { struct json_writer jw_req = JSON_WRITER_INIT; struct gh__request_params params = GH__REQUEST_PARAMS_INIT; - strbuf_setlen(output_filename, 0); - gh__response_status__zero(status); params.object_count = build_json_payload__gvfs_objects( - &jw_req, iter, nr_wanted_in_block); + &jw_req, iter, nr_wanted_in_block, ¶ms.loose_oid); *nr_oid_taken = params.object_count; strbuf_addstr(¶ms.tr2_label, "POST/objects"); @@ -2511,7 +2638,7 @@ static void do__packfile__gvfs_objects(struct gh__response_status *status, "Content-Type: application/json"); /* * We really always want a packfile. But if the payload only - * requests 1 OID, the server will/may send us a single loose + * requests 1 OID, the server will send us a single loose * objects instead. (Apparently the server ignores us when we * only send application/x-git-packfile and does it anyway.) * @@ -2523,156 +2650,172 @@ static void do__packfile__gvfs_objects(struct gh__response_status *status, params.headers = curl_slist_append(params.headers, "Accept: application/x-git-loose-object"); - if (gh__cmd_opts.show_progress) { - strbuf_addf(¶ms.progress_base_phase2_msg, - "Requesting packfile %d/%d with %ld objects", - j_pack_num, j_pack_den, - params.object_count); - strbuf_addf(¶ms.progress_base_phase3_msg, - "Receiving packfile %d/%d with %ld objects", - j_pack_num, j_pack_den, - params.object_count); - } + setup_gvfs_objects_progress(¶ms, j_pack_num, j_pack_den); do_req__with_fallback("gvfs/objects", ¶ms, status); - if (status->ec == GH__ERROR_CODE__OK) - strbuf_addbuf(output_filename, ¶ms.final_packfile_filename); + + if (status->ec == GH__ERROR_CODE__OK) { + struct strbuf msg = STRBUF_INIT; + + if (params.object_count > 1) + strbuf_addf(&msg, "packfile %s", + params.final_packfile_filename.buf); + else + strbuf_addf(&msg, "loose %s", + oid_to_hex(¶ms.loose_oid)); + + string_list_append(result_list, msg.buf); + strbuf_release(&msg); + } gh__request_params__release(¶ms); jw_release(&jw_req); } /* - * Bulk or individually fetch a list of objects in one or more http requests. - * Create one or more packfiles and/or loose objects. + * Drive one or more HTTP GET requests to fetch the objects + * in the given OIDSET. These are received into loose objects. * - * We accumulate results for each request in `result_list` until we get a + * Accumulate results for each request in `result_list` until we get a * hard error and have to stop. */ -static void do_fetch_oidset(struct gh__response_status *status, - struct oidset *oids, - unsigned long nr_oid_total, - struct string_list *result_list) +static void do__http_get__fetch_oidset(struct gh__response_status *status, + struct oidset *oids, + unsigned long nr_oid_total, + struct string_list *result_list) { struct oidset_iter iter; - struct strbuf output_filename = STRBUF_INIT; - struct strbuf msg = STRBUF_INIT; struct strbuf err404 = STRBUF_INIT; const struct object_id *oid; unsigned long k; - unsigned long nr_oid_taken; int had_404 = 0; - int j_pack_den = 0; - int j_pack_num = 0; gh__response_status__zero(status); if (!nr_oid_total) return; - if (nr_oid_total > 1) - j_pack_den = ((nr_oid_total + gh__cmd_opts.block_size - 1) - / gh__cmd_opts.block_size); - oidset_iter_init(oids, &iter); - for (k = 0; k < nr_oid_total; k += nr_oid_taken) { - if (nr_oid_total - k == 1 || gh__cmd_opts.block_size == 1) { - oid = oidset_iter_next(&iter); - nr_oid_taken = 1; + for (k = 0; k < nr_oid_total; k++) { + oid = oidset_iter_next(&iter); - do__loose__gvfs_object(status, oid); + do__http_get__gvfs_object(status, oid, k+1, nr_oid_total, + result_list); + /* + * If we get a 404 for an individual object, ignore + * it and get the rest. We'll fixup the 'ec' later. + */ + if (status->ec == GH__ERROR_CODE__HTTP_404) { + if (!err404.len) + strbuf_addf(&err404, "%s: from GET %s", + status->error_message.buf, + oid_to_hex(oid)); /* - * If we get a 404 for an individual object, ignore - * it and get the rest. We'll fixup the 'ec' later. + * Mark the fetch as "incomplete", but don't + * stop trying to get other chunks. */ - if (status->ec == GH__ERROR_CODE__HTTP_404) { - if (!err404.len) - strbuf_addf(&err404, "%s: loose object %s", - status->error_message.buf, - oid_to_hex(oid)); - /* - * Mark the fetch as "incomplete", but don't - * stop trying to get other chunks. - */ - had_404 = 1; - continue; - } + had_404 = 1; + continue; + } - if (status->ec != GH__ERROR_CODE__OK) { - /* Stop at the first hard error. */ - strbuf_addf(&status->error_message, ": loose %s", - oid_to_hex(oid)); - goto cleanup; - } + if (status->ec != GH__ERROR_CODE__OK) { + /* Stop at the first hard error. */ + strbuf_addf(&status->error_message, ": from GET %s", + oid_to_hex(oid)); + goto cleanup; + } + } - strbuf_setlen(&msg, 0); - strbuf_addf(&msg, "loose %s", oid_to_hex(oid)); - string_list_append(result_list, msg.buf); +cleanup: + if (had_404 && status->ec == GH__ERROR_CODE__OK) { + strbuf_setlen(&status->error_message, 0); + strbuf_addbuf(&status->error_message, &err404); + status->ec = GH__ERROR_CODE__HTTP_404; + } - } else { - strbuf_setlen(&output_filename, 0); + strbuf_release(&err404); +} - j_pack_num++; +/* + * Drive one or more HTTP POST requests to bulk fetch the objects in + * the given OIDSET. Create one or more packfiles and/or loose objects. + * + * Accumulate results for each request in `result_list` until we get a + * hard error and have to stop. + */ +static void do__http_post__fetch_oidset(struct gh__response_status *status, + struct oidset *oids, + unsigned long nr_oid_total, + struct string_list *result_list) +{ + struct oidset_iter iter; + struct strbuf err404 = STRBUF_INIT; + unsigned long k; + unsigned long nr_oid_taken; + int j_pack_den = 0; + int j_pack_num = 0; + int had_404 = 0; + + gh__response_status__zero(status); + if (!nr_oid_total) + return; - do__packfile__gvfs_objects(status, &iter, - gh__cmd_opts.block_size, - j_pack_num, j_pack_den, - &output_filename, - &nr_oid_taken); + oidset_iter_init(oids, &iter); + + j_pack_den = ((nr_oid_total + gh__cmd_opts.block_size - 1) + / gh__cmd_opts.block_size); + + for (k = 0; k < nr_oid_total; k += nr_oid_taken) { + j_pack_num++; + do__http_post__gvfs_objects(status, &iter, + gh__cmd_opts.block_size, + j_pack_num, j_pack_den, + result_list, + &nr_oid_taken); + + /* + * Because the oidset iterator has random + * order, it does no good to say the k-th or + * n-th chunk was incomplete; the client + * cannot use that index for anything. + * + * We get a 404 when at least one object in + * the chunk was not found. + * + * For now, ignore the 404 and go on to the + * next chunk and then fixup the 'ec' later. + */ + if (status->ec == GH__ERROR_CODE__HTTP_404) { + if (!err404.len) + strbuf_addf(&err404, + "%s: from POST", + status->error_message.buf); /* - * Because the oidset iterator has random - * order, it does no good to say the k-th or - * n-th chunk was incomplete; the client - * cannot use that index for anything. - * - * We get a 404 when at least one object in - * the chunk was not found. - * - * TODO Consider various retry strategies (such as - * TODO loose or bisect) on the members within this - * TODO chunk to reduce the impact of the miss. - * - * For now, ignore the 404 and go on to the - * next chunk and then fixup the 'ec' later. + * Mark the fetch as "incomplete", but don't + * stop trying to get other chunks. */ - if (status->ec == GH__ERROR_CODE__HTTP_404) { - if (!err404.len) - strbuf_addf(&err404, - "%s: packfile object", - status->error_message.buf); - /* - * Mark the fetch as "incomplete", but don't - * stop trying to get other chunks. - */ - had_404 = 1; - continue; - } - - if (status->ec != GH__ERROR_CODE__OK) { - /* Stop at the first hard error. */ - strbuf_addstr(&status->error_message, - ": in packfile"); - goto cleanup; - } + had_404 = 1; + continue; + } - strbuf_setlen(&msg, 0); - strbuf_addf(&msg, "packfile %s", output_filename.buf); - string_list_append(result_list, msg.buf); + if (status->ec != GH__ERROR_CODE__OK) { + /* Stop at the first hard error. */ + strbuf_addstr(&status->error_message, + ": from POST"); + goto cleanup; } } cleanup: - strbuf_release(&msg); - strbuf_release(&err404); - strbuf_release(&output_filename); - if (had_404 && status->ec == GH__ERROR_CODE__OK) { strbuf_setlen(&status->error_message, 0); - strbuf_addstr(&status->error_message, "404 Not Found"); + strbuf_addbuf(&status->error_message, &err404); status->ec = GH__ERROR_CODE__HTTP_404; } + + strbuf_release(&err404); } /* @@ -2710,7 +2853,7 @@ static enum gh__error_code do_sub_cmd__config(int argc, const char **argv) finish_init(0); - do__gvfs_config(&status, &config_data); + do__http_get__gvfs_config(&status, &config_data); ec = status.ec; if (ec == GH__ERROR_CODE__OK) @@ -2725,12 +2868,61 @@ static enum gh__error_code do_sub_cmd__config(int argc, const char **argv) } /* - * Read a list of objects from stdin and fetch them in a single request (or - * multiple block-size requests). + * Read a list of objects from stdin and fetch them as a series of + * single object HTTP GET requests. */ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) { static struct option get_options[] = { + OPT_INTEGER('r', "max-retries", &gh__cmd_opts.max_retries, + N_("retries for transient network errors")), + OPT_END(), + }; + + struct gh__response_status status = GH__RESPONSE_STATUS_INIT; + struct oidset oids = OIDSET_INIT; + struct string_list result_list = STRING_LIST_INIT_DUP; + enum gh__error_code ec = GH__ERROR_CODE__OK; + unsigned long nr_oid_total; + int k; + + trace2_cmd_mode("get"); + + if (argc > 1 && !strcmp(argv[1], "-h")) + usage_with_options(objects_get_usage, get_options); + + argc = parse_options(argc, argv, NULL, get_options, objects_get_usage, 0); + if (gh__cmd_opts.max_retries < 0) + gh__cmd_opts.max_retries = 0; + + finish_init(1); + + nr_oid_total = read_stdin_for_oids(&oids); + + do__http_get__fetch_oidset(&status, &oids, nr_oid_total, &result_list); + + ec = status.ec; + + for (k = 0; k < result_list.nr; k++) + printf("%s\n", result_list.items[k].string); + + if (ec != GH__ERROR_CODE__OK) + error("get: %s", status.error_message.buf); + + gh__response_status__release(&status); + oidset_clear(&oids); + string_list_clear(&result_list, 0); + + return ec; +} + +/* + * Read a list of objects from stdin and fetch them in a single request (or + * multiple block-size requests) using one or more HTTP POST requests. + */ +static enum gh__error_code do_sub_cmd__post(int argc, const char **argv) +{ + static struct option post_options[] = { OPT_MAGNITUDE('b', "block-size", &gh__cmd_opts.block_size, N_("number of objects to request at a time")), OPT_INTEGER('d', "depth", &gh__cmd_opts.depth, @@ -2747,12 +2939,12 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) unsigned long nr_oid_total; int k; - trace2_cmd_mode("get"); + trace2_cmd_mode("post"); if (argc > 1 && !strcmp(argv[1], "-h")) - usage_with_options(get_usage, get_options); + usage_with_options(objects_post_usage, post_options); - argc = parse_options(argc, argv, NULL, get_options, get_usage, 0); + argc = parse_options(argc, argv, NULL, post_options, objects_post_usage, 0); if (gh__cmd_opts.depth < 1) gh__cmd_opts.depth = 1; if (gh__cmd_opts.max_retries < 0) @@ -2760,12 +2952,9 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) finish_init(1); - nr_oid_total = read_stdin_from_rev_list(&oids); + nr_oid_total = read_stdin_for_oids(&oids); - trace2_region_enter("gvfs-helper", "get", NULL); - trace2_data_intmax("gvfs-helper", NULL, "get/nr_objects", nr_oid_total); - do_fetch_oidset(&status, &oids, nr_oid_total, &result_list); - trace2_region_leave("gvfs-helper", "get", NULL); + do__http_post__fetch_oidset(&status, &oids, nr_oid_total, &result_list); ec = status.ec; @@ -2773,7 +2962,7 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) printf("%s\n", result_list.items[k].string); if (ec != GH__ERROR_CODE__OK) - error("get: %s", status.error_message.buf); + error("post: %s", status.error_message.buf); gh__response_status__release(&status); oidset_clear(&oids); @@ -2783,12 +2972,14 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) } /* - * Handle the 'get' command when in "server mode". Only call error() and set ec - * for hard errors where we cannot communicate correctly with the foreground - * client process. Pass any actual data errors (such as 404's or 401's from - * the fetch back to the client process. + * Handle the 'objects.get' and 'objects.post' verbs in "server mode". + * + * Only call error() and set ec for hard errors where we cannot + * communicate correctly with the foreground client process. Pass any + * actual data errors (such as 404's or 401's from the fetch) back to + * the client process. */ -static enum gh__error_code do_server_subprocess_get(void) +static enum gh__error_code do_server_subprocess__objects(const char *verb_line) { struct gh__response_status status = GH__RESPONSE_STATUS_INIT; struct oidset oids = OIDSET_INIT; @@ -2799,12 +2990,19 @@ static enum gh__error_code do_server_subprocess_get(void) int len; int err; int k; + enum gh__objects_mode objects_mode; unsigned long nr_oid_total = 0; - /* - * Inside the "get" command, we expect a list of OIDs - * and a flush. - */ + if (!strcmp(verb_line, "objects.get")) + objects_mode = GH__OBJECTS_MODE__GET; + else if (!strcmp(verb_line, "objects.post")) + objects_mode = GH__OBJECTS_MODE__POST; + else { + error("server: unexpected objects-mode verb '%s'", verb_line); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + goto cleanup; + } + while (1) { len = packet_read_line_gently(0, NULL, &line); if (len < 0 || !line) @@ -2829,10 +3027,10 @@ static enum gh__error_code do_server_subprocess_get(void) goto cleanup; } - trace2_region_enter("gvfs-helper", "server/get", NULL); - trace2_data_intmax("gvfs-helper", NULL, "server/get/nr_objects", nr_oid_total); - do_fetch_oidset(&status, &oids, nr_oid_total, &result_list); - trace2_region_leave("gvfs-helper", "server/get", NULL); + if (objects_mode == GH__OBJECTS_MODE__GET) + do__http_get__fetch_oidset(&status, &oids, nr_oid_total, &result_list); + else + do__http_post__fetch_oidset(&status, &oids, nr_oid_total, &result_list); /* * Write pathname of the ODB where we wrote all of the objects @@ -2887,7 +3085,7 @@ static enum gh__error_code do_server_subprocess_get(void) return ec; } -typedef enum gh__error_code (fn_subprocess_cmd)(void); +typedef enum gh__error_code (fn_subprocess_cmd)(const char *verb_line); struct subprocess_capability { const char *name; @@ -2896,7 +3094,7 @@ struct subprocess_capability { }; static struct subprocess_capability caps[] = { - { "get", 0, do_server_subprocess_get }, + { "objects", 0, do_server_subprocess__objects }, { NULL, 0, NULL }, }; @@ -3027,8 +3225,9 @@ static enum gh__error_code do_sub_cmd__server(int argc, const char **argv) } for (k = 0; caps[k].name; k++) { - if (caps[k].client_has && !strcmp(line, caps[k].name)) { - ec = (caps[k].pfn)(); + if (caps[k].client_has && + starts_with(line, caps[k].name)) { + ec = (caps[k].pfn)(line); if (ec != GH__ERROR_CODE__OK) goto cleanup; goto top_of_loop; @@ -3049,6 +3248,9 @@ static enum gh__error_code do_sub_cmd(int argc, const char **argv) if (!strcmp(argv[0], "get")) return do_sub_cmd__get(argc, argv); + if (!strcmp(argv[0], "post")) + return do_sub_cmd__post(argc, argv); + if (!strcmp(argv[0], "config")) return do_sub_cmd__config(argc, argv); @@ -3099,8 +3301,8 @@ int cmd_main(int argc, const char **argv) setup_git_directory_gently(NULL); /* Set any non-zero initial values in gh__cmd_opts. */ - gh__cmd_opts.depth = GH__DEFAULT_COMMIT_DEPTH; - gh__cmd_opts.block_size = GH__DEFAULT_BLOCK_SIZE; + gh__cmd_opts.depth = GH__DEFAULT__OBJECTS_POST__COMMIT_DEPTH; + gh__cmd_opts.block_size = GH__DEFAULT__OBJECTS_POST__BLOCK_SIZE; gh__cmd_opts.max_retries = GH__DEFAULT_MAX_RETRIES; gh__cmd_opts.max_transient_backoff_sec = GH__DEFAULT_MAX_TRANSIENT_BACKOFF_SEC; From 2729138fc65e97571e7567c04c376210475b06b9 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 26 Nov 2019 14:13:57 -0500 Subject: [PATCH 082/104] gvfs-helper: add prefetch .keep file for last packfile Signed-off-by: Jeff Hostetler --- gvfs-helper.c | 83 ++++++++++++++++++++++++++++++++++++++++-- t/t5799-gvfs-helper.sh | 29 +++++++++++++++ 2 files changed, 109 insertions(+), 3 deletions(-) diff --git a/gvfs-helper.c b/gvfs-helper.c index 129c2f429eb08e..b9b2090519c379 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -1864,6 +1864,7 @@ static void my_run_index_pack(struct gh__request_params *params, static void my_finalize_packfile(struct gh__request_params *params, struct gh__response_status *status, + int b_keep, const struct strbuf *temp_path_pack, const struct strbuf *temp_path_idx, struct strbuf *final_path_pack, @@ -1883,6 +1884,21 @@ static void my_finalize_packfile(struct gh__request_params *params, return; } + if (b_keep) { + struct strbuf keep = STRBUF_INIT; + int fd_keep; + + strbuf_addbuf(&keep, final_path_pack); + strbuf_strip_suffix(&keep, ".pack"); + strbuf_addstr(&keep, ".keep"); + + fd_keep = xopen(keep.buf, O_WRONLY | O_CREAT | O_TRUNC, 0666); + if (fd_keep >= 0) + close(fd_keep); + + strbuf_release(&keep); + } + if (params->result_list) { struct strbuf result_msg = STRBUF_INIT; @@ -1935,7 +1951,7 @@ static void install_packfile(struct gh__request_params *params, create_final_packfile_pathnames("vfs", packfile_checksum.buf, NULL, &final_path_pack, &final_path_idx, &final_filename); - my_finalize_packfile(params, status, + my_finalize_packfile(params, status, 0, &temp_path_pack, &temp_path_idx, &final_path_pack, &final_path_idx, &final_filename); @@ -2031,6 +2047,12 @@ struct ph { /* * Extract the next packfile from the multipack. + * Install {.pack, .idx, .keep} set. + * + * Mark each successfully installed prefetch pack as .keep it as installed + * in case we have errors decoding/indexing later packs within the received + * multipart file. (A later pass can delete the unnecessary .keep files + * from this and any previous invocations.) */ static void extract_packfile_from_multipack( struct gh__request_params *params, @@ -2125,7 +2147,7 @@ static void extract_packfile_from_multipack( } else { /* - * Server send the .idx immediately after the .pack in the + * Server sent the .idx immediately after the .pack in the * data stream. I'm tempted to verify it, but that defeats * the purpose of having it cached... */ @@ -2147,7 +2169,7 @@ static void extract_packfile_from_multipack( &final_path_pack, &final_path_idx, &final_filename); - my_finalize_packfile(params, status, + my_finalize_packfile(params, status, 1, &temp_path_pack, &temp_path_idx, &final_path_pack, &final_path_idx, &final_filename); @@ -2162,6 +2184,56 @@ static void extract_packfile_from_multipack( strbuf_release(&final_filename); } +struct keep_files_data { + timestamp_t max_timestamp; + int pos_of_max; + struct string_list *keep_files; +}; + +static void cb_keep_files(const char *full_path, size_t full_path_len, + const char *file_path, void *void_data) +{ + struct keep_files_data *data = void_data; + const char *val; + timestamp_t t; + + /* + * We expect prefetch packfiles named like: + * + * prefetch--.keep + */ + if (!skip_prefix(file_path, "prefetch-", &val)) + return; + if (!ends_with(val, ".keep")) + return; + + t = strtol(val, NULL, 10); + if (t > data->max_timestamp) { + data->pos_of_max = data->keep_files->nr; + data->max_timestamp = t; + } + + string_list_append(data->keep_files, full_path); +} + +static void delete_stale_keep_files( + struct gh__request_params *params, + struct gh__response_status *status) +{ + struct string_list keep_files = STRING_LIST_INIT_DUP; + struct keep_files_data data = { 0, 0, &keep_files }; + int k; + + for_each_file_in_pack_dir(gh__global.buf_odb_path.buf, + cb_keep_files, &data); + for (k = 0; k < keep_files.nr; k++) { + if (k != data.pos_of_max) + unlink(keep_files.items[k].string); + } + + string_list_clear(&keep_files, 0); +} + /* * Cut apart the received multipart response into individual packfiles * and install each one. @@ -2180,6 +2252,7 @@ static void install_prefetch(struct gh__request_params *params, unsigned short np; unsigned short k; int fd = -1; + int nr_installed = 0; struct strbuf temp_path_mp = STRBUF_INIT; @@ -2220,8 +2293,12 @@ static void install_prefetch(struct gh__request_params *params, extract_packfile_from_multipack(params, status, fd, k); if (status->ec != GH__ERROR_CODE__OK) break; + nr_installed++; } + if (nr_installed) + delete_stale_keep_files(params, status); + cleanup: if (fd != -1) close(fd); diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh index efaca1f2e4d645..b654b1e4cdb757 100755 --- a/t/t5799-gvfs-helper.sh +++ b/t/t5799-gvfs-helper.sh @@ -388,6 +388,30 @@ verify_received_packfile_count () { return 0 } +# Verify that we have exactly 1 prefetch .keep file. +# Optionally, verify that it has the given timestamp. +# +verify_prefetch_keeps () { + count=$(( $(ls -1 "$SHARED_CACHE_T1"/pack/prefetch-*.keep | wc -l) )) + if test $count -ne 1 + then + echo "verify_prefetch_keep_file_count: found $count, expected 1." + return 1 + fi + + if test $# -eq 1 + then + count=$(( $(ls -1 "$SHARED_CACHE_T1"/pack/prefetch-$1-*.keep | wc -l) )) + if test $count -ne 1 + then + echo "verify_prefetch_keep_file_count: did not find expected keep file." + return 1 + fi + fi + + return 0 +} + per_test_cleanup () { stop_gvfs_protocol_server @@ -641,6 +665,7 @@ test_expect_success 'basic: PREFETCH w/o arg gets all' ' # packfile. # verify_received_packfile_count 3 && + verify_prefetch_keeps 1200000000 && stop_gvfs_protocol_server && verify_connection_count 1 @@ -662,6 +687,7 @@ test_expect_success 'basic: PREFETCH w/ arg' ' # packfile. # verify_received_packfile_count 2 && + verify_prefetch_keeps 1200000000 && stop_gvfs_protocol_server && verify_connection_count 1 @@ -684,6 +710,7 @@ test_expect_success 'basic: PREFETCH mayhem no_prefetch_idx' ' # packfile. # verify_received_packfile_count 2 && + verify_prefetch_keeps 1200000000 && stop_gvfs_protocol_server && verify_connection_count 1 @@ -705,6 +732,7 @@ test_expect_success 'basic: PREFETCH up-to-date' ' # packfile. # verify_received_packfile_count 2 && + verify_prefetch_keeps 1200000000 && # Ask again for any packfiles newer than what we have cached locally. # @@ -718,6 +746,7 @@ test_expect_success 'basic: PREFETCH up-to-date' ' # packfile. # verify_received_packfile_count 0 && + verify_prefetch_keeps 1200000000 && stop_gvfs_protocol_server && verify_connection_count 2 From 11be079ce6576abc699edcc795a130e6de90471a Mon Sep 17 00:00:00 2001 From: Derrick Stolee Date: Thu, 24 Oct 2019 08:11:25 -0400 Subject: [PATCH 083/104] gvfs-helper: dramatically reduce progress noise During development, it was very helpful to see the gvfs-helper do its work to request a pack-file or download a loose object. When these messages appear during normal use, it leads to a very noisy terminal output. Remove all progress indicators when downloading loose objects. We know that these can be numbered in the thousands in certain kinds of history calls, and would litter the terminal output with noise. This happens during 'git fetch' or 'git pull' as well when the tip commits are checked for the new refs. Remove the "Requesting packfile with %ld objects" message, as this operation is very fast. We quickly follow up with the more valuable "Receiving packfile %ld%ld with %ld objects". When a large "git checkout" causes many pack-file downloads, it is good to know that Git is asking for data from the server. Signed-off-by: Derrick Stolee --- gvfs-helper.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/gvfs-helper.c b/gvfs-helper.c index abae824cdadaed..21b57d90ad3b16 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -2528,17 +2528,11 @@ static void setup_gvfs_objects_progress(struct gh__request_params *params, return; if (params->b_is_post && params->object_count > 1) { - strbuf_addf(¶ms->progress_base_phase2_msg, - "Requesting packfile %ld/%ld with %ld objects", - num, den, params->object_count); strbuf_addf(¶ms->progress_base_phase3_msg, "Receiving packfile %ld/%ld with %ld objects", num, den, params->object_count); - } else { - strbuf_addf(¶ms->progress_base_phase3_msg, - "Receiving %ld/%ld loose object", - num, den); } + /* If requesting only one object, then do not show progress */ } /* From 076972a4acac7d47ab5c020891a7459a880ef58d Mon Sep 17 00:00:00 2001 From: Derrick Stolee Date: Mon, 16 Dec 2019 13:12:32 -0500 Subject: [PATCH 084/104] gvfs-helper: do one read in my_copy_fd_len_tail() Signed-off-by: Derrick Stolee --- gvfs-helper.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/gvfs-helper.c b/gvfs-helper.c index b9b2090519c379..9ab2e8e315eda6 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -2021,18 +2021,18 @@ static int my_copy_fd_len_tail(int fd_in, int fd_out, ssize_t nr_bytes_total, { memset(buf_tail, 0, tail_len); + if (my_copy_fd_len(fd_in, fd_out, nr_bytes_total) < 0) + return -1; + if (nr_bytes_total < tail_len) - return my_copy_fd_len(fd_in, fd_out, nr_bytes_total); + return 0; - if (my_copy_fd_len(fd_in, fd_out, (nr_bytes_total - tail_len)) < 0) - return -1; + /* Reset the position to read the tail */ + lseek(fd_in, -tail_len, SEEK_CUR); if (xread(fd_in, (char *)buf_tail, tail_len) != tail_len) return -1; - if (write_in_full(fd_out, buf_tail, tail_len) < 0) - return -1; - return 0; } From 6c2fa6bd4ac079357b074bb5e028e6d8623548ec Mon Sep 17 00:00:00 2001 From: Derrick Stolee Date: Mon, 4 Nov 2019 14:47:57 -0500 Subject: [PATCH 085/104] gvfs-helper-client.h: define struct object_id Signed-off-by: Derrick Stolee --- gvfs-helper-client.h | 1 + 1 file changed, 1 insertion(+) diff --git a/gvfs-helper-client.h b/gvfs-helper-client.h index a5a951ff5b5bfe..c1e38fad75f841 100644 --- a/gvfs-helper-client.h +++ b/gvfs-helper-client.h @@ -3,6 +3,7 @@ struct repository; struct commit; +struct object_id; enum gh_client__created { /* From 93c4f147bd6ce37ab5a014d73e6ceec7c7ba64c3 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 16 Jul 2019 10:16:37 -0400 Subject: [PATCH 086/104] trace2:gvfs:experiment: add region to cache_tree_fully_valid() Signed-off-by: Jeff Hostetler --- cache-tree.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/cache-tree.c b/cache-tree.c index da9132781d6274..28af22331269c3 100644 --- a/cache-tree.c +++ b/cache-tree.c @@ -222,7 +222,7 @@ static void discard_unused_subtrees(struct cache_tree *it) } } -int cache_tree_fully_valid(struct cache_tree *it) +static int cache_tree_fully_valid_1(struct cache_tree *it) { int i; if (!it) @@ -230,12 +230,23 @@ int cache_tree_fully_valid(struct cache_tree *it) if (it->entry_count < 0 || !has_object_file(&it->oid)) return 0; for (i = 0; i < it->subtree_nr; i++) { - if (!cache_tree_fully_valid(it->down[i]->cache_tree)) + if (!cache_tree_fully_valid_1(it->down[i]->cache_tree)) return 0; } return 1; } +int cache_tree_fully_valid(struct cache_tree *it) +{ + int result; + + trace2_region_enter("cache_tree", "fully_valid", NULL); + result = cache_tree_fully_valid_1(it); + trace2_region_leave("cache_tree", "fully_valid", NULL); + + return result; +} + static int update_one(struct cache_tree *it, struct cache_entry **cache, int entries, From 86c49e67d9f63587eee8b238fdbf948fbcc7be2f Mon Sep 17 00:00:00 2001 From: Derrick Stolee Date: Mon, 16 Dec 2019 16:26:33 -0500 Subject: [PATCH 087/104] gvfs-helper: move content-type warning for prefetch packs Signed-off-by: Derrick Stolee --- gvfs-helper.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/gvfs-helper.c b/gvfs-helper.c index 9ab2e8e315eda6..6403e685302b8b 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -2393,25 +2393,25 @@ static void install_result(struct gh__request_params *params, install_prefetch(params, status); return; } - } - - if (!strcmp(status->content_type.buf, "application/x-git-packfile")) { - assert(params->b_is_post); - assert(params->objects_mode == GH__OBJECTS_MODE__POST); + } else { + if (!strcmp(status->content_type.buf, "application/x-git-packfile")) { + assert(params->b_is_post); + assert(params->objects_mode == GH__OBJECTS_MODE__POST); - install_packfile(params, status); - return; - } + install_packfile(params, status); + return; + } - if (!strcmp(status->content_type.buf, - "application/x-git-loose-object")) { - /* - * We get these for "gvfs/objects" GET and POST requests. - * - * Note that this content type is singular, not plural. - */ - install_loose(params, status); - return; + if (!strcmp(status->content_type.buf, + "application/x-git-loose-object")) { + /* + * We get these for "gvfs/objects" GET and POST requests. + * + * Note that this content type is singular, not plural. + */ + install_loose(params, status); + return; + } } strbuf_addf(&status->error_message, From 03cfdaa8338e64f7d06dddd8c151eb411f596d4f Mon Sep 17 00:00:00 2001 From: Derrick Stolee Date: Mon, 11 Nov 2019 14:56:02 -0500 Subject: [PATCH 088/104] gvfs-helper: handle pack-file after single POST request If our POST request includes a commit ID, then the the remote will send a pack-file containing the commit and all trees reachable from its root tree. With the current implementation, this causes a failure since we call install_loose() when asking for one object. Modify the condition to check for install_pack() when the response type changes. Also, create a tempfile for the pack-file download or else we will have problems! Signed-off-by: Derrick Stolee --- gvfs-helper.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/gvfs-helper.c b/gvfs-helper.c index 21b57d90ad3b16..e902e1f6eaf34c 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -1119,7 +1119,9 @@ static void gh__run_one_slot(struct active_request_slot *slot, stop_progress(¶ms->progress); if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file) { - if (params->b_is_post && params->object_count > 1) + if (params->b_is_post && + !strcmp(status->content_type.buf, + "application/x-git-packfile")) install_packfile(params, status); else install_loose(params, status); @@ -2200,10 +2202,10 @@ static void do_req(const char *url_base, if (params->tempfile) delete_tempfile(¶ms->tempfile); - if (params->b_is_post && params->object_count > 1) + if (params->b_is_post) create_tempfile_for_packfile(params, status); - else - create_tempfile_for_loose(params, status); + + create_tempfile_for_loose(params, status); if (!params->tempfile || status->ec != GH__ERROR_CODE__OK) return; From bea2a6c7f6c2d4c97e0deffa8154ea88307fb594 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Tue, 16 Jul 2019 10:40:56 -0400 Subject: [PATCH 089/104] trace2:gvfs:experiment: add unpack_entry() counter to unpack_trees() and report_tracking() Signed-off-by: Jeff Hostetler --- builtin/checkout.c | 6 ++++++ packfile.c | 9 +++++++++ packfile.h | 5 +++++ unpack-trees.c | 5 +++++ 4 files changed, 25 insertions(+) diff --git a/builtin/checkout.c b/builtin/checkout.c index 7f14cea9b39555..08b7c920f21220 100644 --- a/builtin/checkout.c +++ b/builtin/checkout.c @@ -26,6 +26,7 @@ #include "unpack-trees.h" #include "wt-status.h" #include "xdiff-interface.h" +#include "packfile.h" static const char * const checkout_usage[] = { N_("git checkout [] "), @@ -934,8 +935,13 @@ static void update_refs_for_switch(const struct checkout_opts *opts, strbuf_release(&msg); if (!opts->quiet && (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD")))) { + unsigned long nr_unpack_entry_at_start; + trace2_region_enter("exp", "report_tracking", the_repository); + nr_unpack_entry_at_start = get_nr_unpack_entry(); report_tracking(new_branch_info); + trace2_data_intmax("exp", NULL, "report_tracking/nr_unpack_entries", + (intmax_t)(get_nr_unpack_entry() - nr_unpack_entry_at_start)); trace2_region_leave("exp", "report_tracking", the_repository); } } diff --git a/packfile.c b/packfile.c index f4e752996dbcca..1cafc811066dab 100644 --- a/packfile.c +++ b/packfile.c @@ -1655,6 +1655,13 @@ static void *read_object(struct repository *r, return content; } +static unsigned long g_nr_unpack_entry; + +unsigned long get_nr_unpack_entry(void) +{ + return g_nr_unpack_entry; +} + void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset, enum object_type *final_type, unsigned long *final_size) { @@ -1668,6 +1675,8 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset, int delta_stack_nr = 0, delta_stack_alloc = UNPACK_ENTRY_STACK_PREALLOC; int base_from_cache = 0; + g_nr_unpack_entry++; + write_pack_access_log(p, obj_offset); /* PHASE 1: drill down to the innermost base object */ diff --git a/packfile.h b/packfile.h index 240aa73b95a649..0fe6b054115253 100644 --- a/packfile.h +++ b/packfile.h @@ -189,4 +189,9 @@ int is_promisor_object(const struct object_id *oid); int load_idx(const char *path, const unsigned int hashsz, void *idx_map, size_t idx_size, struct packed_git *p); +/* + * Return the number of objects fetched from a packfile. + */ +unsigned long get_nr_unpack_entry(void); + #endif diff --git a/unpack-trees.c b/unpack-trees.c index 317e7a0aa93596..2f8247fb8b9da4 100644 --- a/unpack-trees.c +++ b/unpack-trees.c @@ -18,6 +18,7 @@ #include "promisor-remote.h" #include "gvfs.h" #include "virtualfilesystem.h" +#include "packfile.h" /* * Error messages expected by scripts out of plumbing commands such as @@ -1590,11 +1591,13 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options static struct cache_entry *dfc; struct pattern_list pl; int free_pattern_list = 0; + unsigned long nr_unpack_entry_at_start; if (len > MAX_UNPACK_TREES) die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES); trace2_region_enter("exp", "unpack_trees", NULL); + nr_unpack_entry_at_start = get_nr_unpack_entry(); trace_performance_enter(); if (!core_apply_sparse_checkout || !o->update) @@ -1764,6 +1767,8 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options if (free_pattern_list) clear_pattern_list(&pl); trace_performance_leave("unpack_trees"); + trace2_data_intmax("unpack_trees", NULL, "unpack_trees/nr_unpack_entries", + (intmax_t)(get_nr_unpack_entry() - nr_unpack_entry_at_start)); trace2_region_leave("exp", "unpack_trees", NULL); return ret; From 5da693a6da54acd76dfd7a00e647d953eca04d7d Mon Sep 17 00:00:00 2001 From: Derrick Stolee Date: Tue, 17 Dec 2019 07:25:40 -0500 Subject: [PATCH 090/104] fetch: use gvfs-helper prefetch under config The gvfs-helper allows us to download prefetch packs using a simple subprocess call. The gvfs-helper-client.h method will automatically compute the timestamp if passing 0, and passing NULL for the number of downloaded packs is valid. Signed-off-by: Derrick Stolee --- Documentation/config/core.txt | 4 ++++ builtin/fetch.c | 5 +++++ gvfs.h | 1 + 3 files changed, 10 insertions(+) diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt index 5ad4f10004679f..8483f28c1151ab 100644 --- a/Documentation/config/core.txt +++ b/Documentation/config/core.txt @@ -670,6 +670,10 @@ core.gvfs:: is first accessed and brought down to the client. Git.exe can't currently tell the first access vs subsequent accesses so this flag just blocks them from occurring at all. + GVFS_PREFETCH_DURING_FETCH:: + Bit value 128 + While performing a `git fetch` command, use the gvfs-helper to + perform a "prefetch" of commits and trees. -- core.useGvfsHelper:: diff --git a/builtin/fetch.c b/builtin/fetch.c index b5788c16bf43da..303826d53cd2d7 100644 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@ -13,6 +13,8 @@ #include "string-list.h" #include "remote.h" #include "transport.h" +#include "gvfs.h" +#include "gvfs-helper-client.h" #include "run-command.h" #include "parse-options.h" #include "sigchain.h" @@ -1823,6 +1825,9 @@ int cmd_fetch(int argc, const char **argv, const char *prefix) } } + if (core_gvfs & GVFS_PREFETCH_DURING_FETCH) + gh_client__prefetch(0, NULL); + if (remote) { if (filter_options.choice || has_promisor_remote()) fetch_one_setup_partial(remote); diff --git a/gvfs.h b/gvfs.h index e193502151467a..7d999f3e8d234f 100644 --- a/gvfs.h +++ b/gvfs.h @@ -17,6 +17,7 @@ #define GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT (1 << 3) #define GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK (1 << 4) #define GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS (1 << 6) +#define GVFS_PREFETCH_DURING_FETCH (1 << 7) void gvfs_load_config_value(const char *value); int gvfs_config_is_set(int mask); From 6f69b270e0f68c1f807e1cd8877000c7d8e2997e Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Fri, 25 Oct 2019 17:10:25 -0400 Subject: [PATCH 091/104] test-gvfs-prococol, t5799: tests for gvfs-helper Create t/helper/test-gvfs-protocol.c and t/t5799-gvfs-helper.sh to test gvfs-helper. Create t/helper/test-gvfs-protocol.c as a stand-alone web server that speaks the GVFS Protocol [1] and serves loose objects and packfiles to clients. It is borrows heavily from the code in daemon.c. It includes a "mayhem" mode to cause various network and HTTP errors to test the retry/recovery ability of gvfs-helper. Create t/t5799-gvfs-helper.sh to test gvfs-helper. [1] https://github.com/microsoft/VFSForGit/blob/master/Protocol.md Signed-off-by: Jeff Hostetler --- Makefile | 1 + gvfs-helper.c | 21 +- t/helper/test-gvfs-protocol.c | 1748 +++++++++++++++++++++++++++++++++ t/t5799-gvfs-helper.sh | 972 ++++++++++++++++++ 4 files changed, 2735 insertions(+), 7 deletions(-) create mode 100644 t/helper/test-gvfs-protocol.c create mode 100755 t/t5799-gvfs-helper.sh diff --git a/Makefile b/Makefile index 36a607ee573f3c..906c3ac0d563e0 100644 --- a/Makefile +++ b/Makefile @@ -1381,6 +1381,7 @@ else BASIC_CFLAGS += $(CURL_CFLAGS) PROGRAM_OBJS += gvfs-helper.o + TEST_PROGRAMS_NEED_X += test-gvfs-protocol REMOTE_CURL_PRIMARY = git-remote-http$X REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X diff --git a/gvfs-helper.c b/gvfs-helper.c index e902e1f6eaf34c..fc99bddc7d54c7 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -1873,6 +1873,8 @@ static void install_loose(struct gh__request_params *params, /* * We expect a loose object when we do a GET -or- when we * do a POST with only 1 object. + * + * Note that this content type is singular, not plural. */ if (strcmp(status->content_type.buf, "application/x-git-loose-object")) { @@ -2107,7 +2109,9 @@ static void do_throttle_spin(struct gh__request_params *params, strbuf_addstr(®ion, gh__server_type_label[params->server_type]); trace2_region_enter("gvfs-helper", region.buf, NULL); - progress = start_progress(progress_msg, duration); + if (gh__cmd_opts.show_progress) + progress = start_progress(progress_msg, duration); + while (now < end) { display_progress(progress, (now - begin)); @@ -2115,6 +2119,7 @@ static void do_throttle_spin(struct gh__request_params *params, now = time(NULL); } + display_progress(progress, duration); stop_progress(&progress); @@ -2633,13 +2638,15 @@ static void do__http_post__gvfs_objects(struct gh__response_status *status, params.headers = curl_slist_append(params.headers, "Content-Type: application/json"); /* - * We really always want a packfile. But if the payload only - * requests 1 OID, the server will send us a single loose - * objects instead. (Apparently the server ignores us when we - * only send application/x-git-packfile and does it anyway.) + * If our POST contains more than one object, we want the + * server to send us a packfile. We DO NOT want the non-standard + * concatenated loose object format, so we DO NOT send: + * "Accept: application/x-git-loose-objects" (plural) * - * So to make it clear to my future self, go ahead and add - * an accept header for loose objects and own it. + * However, if the payload only requests 1 OID, the server + * will send us a single loose object instead of a packfile, + * so we ACK that and send: + * "Accept: application/x-git-loose-object" (singular) */ params.headers = curl_slist_append(params.headers, "Accept: application/x-git-packfile"); diff --git a/t/helper/test-gvfs-protocol.c b/t/helper/test-gvfs-protocol.c new file mode 100644 index 00000000000000..3ad771152c3424 --- /dev/null +++ b/t/helper/test-gvfs-protocol.c @@ -0,0 +1,1748 @@ +#include "cache.h" +#include "config.h" +#include "pkt-line.h" +#include "run-command.h" +#include "strbuf.h" +#include "string-list.h" +#include "trace2.h" +#include "object.h" +#include "object-store.h" +#include "replace-object.h" +#include "repository.h" +#include "version.h" +#include "dir.h" +#include "json-writer.h" +#include "oidset.h" + +#define TR2_CAT "test-gvfs-protocol" + +static const char *pid_file; +static int verbose; +static int reuseaddr; +static struct string_list mayhem_list = STRING_LIST_INIT_DUP; +static int mayhem_child = 0; +static struct json_writer jw_config = JSON_WRITER_INIT; + +/* + * We look for one of these "servertypes" in the uri-base + * so we can behave differently when we need to. + */ +#define MY_SERVER_TYPE__ORIGIN "servertype/origin" +#define MY_SERVER_TYPE__CACHE "servertype/cache" + +static const char test_gvfs_protocol_usage[] = +"gvfs-protocol [--verbose]\n" +" [--timeout=] [--init-timeout=] [--max-connections=]\n" +" [--reuseaddr] [--pid-file=]\n" +" [--listen=]* [--port=]\n" +" [--mayhem=]*\n" +; + +/* Timeout, and initial timeout */ +static unsigned int timeout; +static unsigned int init_timeout; + +static void logreport(const char *label, const char *err, va_list params) +{ + struct strbuf msg = STRBUF_INIT; + + strbuf_addf(&msg, "[%"PRIuMAX"] %s: ", (uintmax_t)getpid(), label); + strbuf_vaddf(&msg, err, params); + strbuf_addch(&msg, '\n'); + + fwrite(msg.buf, sizeof(char), msg.len, stderr); + fflush(stderr); + + strbuf_release(&msg); +} + +__attribute__((format (printf, 1, 2))) +static void logerror(const char *err, ...) +{ + va_list params; + va_start(params, err); + logreport("error", err, params); + va_end(params); +} + +__attribute__((format (printf, 1, 2))) +static void loginfo(const char *err, ...) +{ + va_list params; + if (!verbose) + return; + va_start(params, err); + logreport("info", err, params); + va_end(params); +} + +__attribute__((format (printf, 1, 2))) +static void logmayhem(const char *err, ...) +{ + va_list params; + if (!verbose) + return; + va_start(params, err); + logreport("mayhem", err, params); + va_end(params); +} + +static void set_keep_alive(int sockfd) +{ + int ka = 1; + + if (setsockopt(sockfd, SOL_SOCKET, SO_KEEPALIVE, &ka, sizeof(ka)) < 0) { + if (errno != ENOTSOCK) + logerror("unable to set SO_KEEPALIVE on socket: %s", + strerror(errno)); + } +} + +////////////////////////////////////////////////////////////////// +// The code in this section is used by "worker" instances to service +// a single connection from a client. The worker talks to the client +// on 0 and 1. +////////////////////////////////////////////////////////////////// + +enum worker_result { + /* + * Operation successful. + * Caller *might* keep the socket open and allow keep-alive. + */ + WR_OK = 0, + /* + * Various errors while processing the request and/or the response. + * Close the socket and clean up. + * Exit child-process with non-zero status. + */ + WR_IO_ERROR = 1<<0, + /* + * Close the socket and clean up. Does not imply an error. + */ + WR_HANGUP = 1<<1, + /* + * The result of a function was influenced by the mayhem settings. + * Does not imply that we need to exit or close the socket. + * Just advice to callers in the worker stack. + */ + WR_MAYHEM = 1<<2, + + WR_STOP_THE_MUSIC = (WR_IO_ERROR | WR_HANGUP), +}; + +/* + * Fields from a parsed HTTP request. + */ +struct req { + struct strbuf start_line; + struct string_list start_line_fields; + + struct strbuf uri_base; + struct strbuf gvfs_api; + struct strbuf slash_args; + struct strbuf quest_args; + + struct string_list header_list; +}; + +#define REQ__INIT { \ + .start_line = STRBUF_INIT, \ + .start_line_fields = STRING_LIST_INIT_DUP, \ + .uri_base = STRBUF_INIT, \ + .gvfs_api = STRBUF_INIT, \ + .slash_args = STRBUF_INIT, \ + .quest_args = STRBUF_INIT, \ + .header_list = STRING_LIST_INIT_NODUP, \ + } + +static void req__release(struct req *req) +{ + strbuf_release(&req->start_line); + string_list_clear(&req->start_line_fields, 0); + + strbuf_release(&req->uri_base); + strbuf_release(&req->gvfs_api); + strbuf_release(&req->slash_args); + strbuf_release(&req->quest_args); + + string_list_clear(&req->header_list, 0); +} + +/* + * Generate a somewhat bogus UUID/GUID that is good enough for + * a test suite, but without requiring platform-specific UUID + * or GUID libraries. + */ +static void gen_fake_uuid(struct strbuf *uuid) +{ + static unsigned int seq = 0; + static struct timeval tv; + static struct tm tm; + static time_t secs; + + strbuf_setlen(uuid, 0); + + if (!seq) { + gettimeofday(&tv, NULL); + secs = tv.tv_sec; + gmtime_r(&secs, &tm); + } + + /* + * Build a string that looks like: + * + * "ffffffff-eeee-dddd-cccc-bbbbbbbbbbbb" + * + * Note that the first digit in the "dddd" section gives the + * UUID type. We set it to zero so that we won't collide with + * any "real" UUIDs. + */ + strbuf_addf(uuid, "%04d%02d%02d-%02d%02d-00%02d-%04x-%08x%04x", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, + tm.tm_sec, + (unsigned)(getpid() & 0xffff), + (unsigned)(tv.tv_usec & 0xffffffff), + (seq++ & 0xffff)); +} + +/* + * Send a chunk of data to the client using HTTP chunked + * transfer coding rules. + * + * https://tools.ietf.org/html/rfc7230#section-4.1 + */ +static enum worker_result send_chunk(int fd, const unsigned char *buf, + size_t len_buf) +{ + char chunk_size[100]; + int chunk_size_len = xsnprintf(chunk_size, sizeof(chunk_size), + "%x\r\n", (unsigned int)len_buf); + + if ((write_in_full(fd, chunk_size, chunk_size_len) < 0) || + (write_in_full(fd, buf, len_buf) < 0) || + (write_in_full(fd, "\r\n", 2) < 0)) { + logerror("unable to send chunk"); + return WR_IO_ERROR; + } + + return WR_OK; +} + +static enum worker_result send_final_chunk(int fd) +{ + if (write_in_full(fd, "0\r\n\r\n", 5) < 0) { + logerror("unable to send final chunk"); + return WR_IO_ERROR; + } + + return WR_OK; +} + +static enum worker_result send_http_error( + int fd, + int http_code, const char *http_code_name, + int retry_after_seconds, enum worker_result wr_in) +{ + struct strbuf response_header = STRBUF_INIT; + struct strbuf response_content = STRBUF_INIT; + struct strbuf uuid = STRBUF_INIT; + enum worker_result wr; + + strbuf_addf(&response_content, "Error: %d %s\r\n", + http_code, http_code_name); + if (retry_after_seconds > 0) + strbuf_addf(&response_content, "Retry-After: %d\r\n", + retry_after_seconds); + + strbuf_addf (&response_header, "HTTP/1.1 %d %s\r\n", http_code, http_code_name); + strbuf_addstr(&response_header, "Cache-Control: private\r\n"); + strbuf_addstr(&response_header, "Content-Type: text/plain\r\n"); + strbuf_addf (&response_header, "Content-Length: %d\r\n", (int)response_content.len); + if (retry_after_seconds > 0) + strbuf_addf (&response_header, "Retry-After: %d\r\n", retry_after_seconds); + strbuf_addf( &response_header, "Server: test-gvfs-protocol/%s\r\n", git_version_string); + strbuf_addf( &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822))); + gen_fake_uuid(&uuid); + strbuf_addf( &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf); + strbuf_addstr(&response_header, "\r\n"); + + if (write_in_full(fd, response_header.buf, response_header.len) < 0) { + logerror("unable to write response header"); + wr = WR_IO_ERROR; + goto done; + } + + if (write_in_full(fd, response_content.buf, response_content.len) < 0) { + logerror("unable to write response content body"); + wr = WR_IO_ERROR; + goto done; + } + + wr = wr_in; + +done: + strbuf_release(&uuid); + strbuf_release(&response_header); + strbuf_release(&response_content); + + return wr; +} + +/* + * Return 1 if we send an AUTH error to the client. + */ +static int mayhem_try_auth(struct req *req, enum worker_result *wr_out) +{ + *wr_out = WR_OK; + + if (string_list_has_string(&mayhem_list, "http_401")) { + struct string_list_item *item; + int has_auth = 0; + for_each_string_list_item(item, &req->header_list) { + if (starts_with(item->string, "Authorization: Basic")) { + has_auth = 1; + break; + } + } + if (!has_auth) { + if (strstr(req->uri_base.buf, MY_SERVER_TYPE__ORIGIN)) { + logmayhem("http_401 (origin)"); + *wr_out = send_http_error(1, 401, "Unauthorized", -1, + WR_MAYHEM); + return 1; + } + + else if (strstr(req->uri_base.buf, MY_SERVER_TYPE__CACHE)) { + /* + * Cache servers use a non-standard 400 rather than a 401. + */ + logmayhem("http_400 (cacheserver)"); + *wr_out = send_http_error(1, 400, "Bad Request", -1, + WR_MAYHEM); + return 1; + } + + else { + /* + * Non-qualified server type. + */ + logmayhem("http_401"); + *wr_out = send_http_error(1, 401, "Unauthorized", -1, + WR_MAYHEM); + return 1; + } + } + } + + return 0; +} + +/* + * Build fake gvfs/config data using our IP address and port. + * + * The Min/Max data is just random noise copied from the example + * in the documentation. + */ +static void build_gvfs_config_json(struct json_writer *jw, + struct string_list *listen_addr, + int listen_port) +{ + jw_object_begin(jw, 0); + { + jw_object_inline_begin_array(jw, "AllowedGvfsClientVersions"); + { + jw_array_inline_begin_object(jw); + { + jw_object_inline_begin_object(jw, "Max"); + { + jw_object_intmax(jw, "Major", 0); + jw_object_intmax(jw, "Minor", 4); + jw_object_intmax(jw, "Build", 0); + jw_object_intmax(jw, "Revision", 0); + } + jw_end(jw); + + jw_object_inline_begin_object(jw, "Min"); + { + jw_object_intmax(jw, "Major", 0); + jw_object_intmax(jw, "Minor", 2); + jw_object_intmax(jw, "Build", 0); + jw_object_intmax(jw, "Revision", 0); + } + jw_end(jw); + } + jw_end(jw); + + jw_array_inline_begin_object(jw); + { + jw_object_null(jw, "Max"); + jw_object_inline_begin_object(jw, "Min"); + { + jw_object_intmax(jw, "Major", 0); + jw_object_intmax(jw, "Minor", 5); + jw_object_intmax(jw, "Build", 16326); + jw_object_intmax(jw, "Revision", 1); + } + jw_end(jw); + } + jw_end(jw); + } + jw_end(jw); + + jw_object_inline_begin_array(jw, "CacheServers"); + { + struct string_list_item *item; + int k = 0; + + for_each_string_list_item(item, listen_addr) { + jw_array_inline_begin_object(jw); + { + struct strbuf buf = STRBUF_INIT; + + strbuf_addf(&buf, "http://%s:%d/%s", + item->string, + listen_port, + MY_SERVER_TYPE__CACHE); + jw_object_string(jw, "Url", buf.buf); + strbuf_release(&buf); + + strbuf_addf(&buf, "cs%02d", k); + jw_object_string(jw, "Name", buf.buf); + strbuf_release(&buf); + + jw_object_bool(jw, "GlobalDefault", + k++ == 0); + } + jw_end(jw); + } + } + jw_end(jw); + } + jw_end(jw); +} +/* + * Per the GVFS Protocol, this should only be recognized on the origin + * server (not the cache-server). It returns a JSON payload of config + * data. + */ +static enum worker_result do__gvfs_config__get(struct req *req) +{ + struct strbuf response_header = STRBUF_INIT; + struct strbuf uuid = STRBUF_INIT; + enum worker_result wr; + + if (strstr(req->uri_base.buf, MY_SERVER_TYPE__CACHE)) + return send_http_error(1, 404, "Not Found", -1, WR_OK); + + strbuf_addstr(&response_header, "HTTP/1.1 200 OK\r\n"); + strbuf_addstr(&response_header, "Cache-Control: private\r\n"); + strbuf_addstr(&response_header, "Content-Type: text/plain\r\n"); + strbuf_addf( &response_header, "Content-Length: %d\r\n", (int)jw_config.json.len); + strbuf_addf( &response_header, "Server: test-gvfs-protocol/%s\r\n", git_version_string); + strbuf_addf( &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822))); + gen_fake_uuid(&uuid); + strbuf_addf( &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf); + strbuf_addstr(&response_header, "\r\n"); + + if (write_in_full(1, response_header.buf, response_header.len) < 0) { + logerror("unable to write response header"); + wr = WR_IO_ERROR; + goto done; + } + + if (write_in_full(1, jw_config.json.buf, jw_config.json.len) < 0) { + logerror("unable to write response content body"); + wr = WR_IO_ERROR; + goto done; + } + + wr = WR_OK; + +done: + strbuf_release(&uuid); + strbuf_release(&response_header); + + return wr; +} + +/* + * Send the contents of the in-memory inflated object in "compressed + * loose object" format over the socket. + * + * Because we are using keep-alive and are streaming the compressed + * chunks as we produce them, we set the transport-encoding and not + * the content-length. + * + * Our usage here is different from `git-http-backend` because it will + * only send a loose object if it exists as a loose object in the ODB + * (see the "/objects/[0-9a-f]{2}/[0-9a-f]{38}$" regex_t declarations) + * by doing a file-copy. + * + * We want to send an arbitrary object without regard for how it is + * currently stored in the local ODB. + * + * Also, we don't want any of the type-specific branching found in the + * sha1-file.c functions (such as special casing BLOBs). Specifically, + * we DO NOT want any of the content conversion filters. We just want + * to send the raw content as is. + * + * So, we steal freely from sha1-file.c routines: + * write_object_file_prepare() + * write_loose_object() + */ +static enum worker_result send_loose_object(const struct object_info *oi, + const struct object_id *oid, + int fd) +{ +#define MAX_HEADER_LEN 32 + struct strbuf response_header = STRBUF_INIT; + struct strbuf uuid = STRBUF_INIT; + char object_header[MAX_HEADER_LEN]; + unsigned char compressed[4096]; + git_zstream stream; + struct object_id oid_check; + git_hash_ctx c; + int object_header_len; + int ret; + + /* + * We are blending several somewhat independent concepts here: + * + * [1] reconstructing the object format in parts: + * + * ::= + * + * [1a] ::= SP NUL + * [1b] ::= + * + * [2] verify that we constructed [1] correctly by computing + * the hash of [1] and verify it matches the passed OID. + * + * [3] compress [1] because that is how loose objects are + * stored on disk. We compress it as we stream it to + * the client. + * + * [4] send HTTP response headers to the client. + * + * [5] stream each chunk from [3] to the client using the HTTP + * chunked transfer coding. + * + * [6] for extra credit, we repeat the hash construction in [2] + * as we stream it. + */ + + /* [4] */ + strbuf_addstr(&response_header, "HTTP/1.1 200 OK\r\n"); + strbuf_addstr(&response_header, "Cache-Control: private\r\n"); + strbuf_addstr(&response_header, "Content-Type: application/x-git-loose-object\r\n"); + strbuf_addf( &response_header, "Server: test-gvfs-protocol/%s\r\n", git_version_string); + strbuf_addstr(&response_header, "Transfer-Encoding: chunked\r\n"); + strbuf_addf( &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822))); + gen_fake_uuid(&uuid); + strbuf_addf( &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf); + strbuf_addstr(&response_header, "\r\n"); + + if (write_in_full(fd, response_header.buf, response_header.len) < 0) { + logerror("unable to write response header"); + return WR_IO_ERROR; + } + + strbuf_release(&uuid); + strbuf_release(&response_header); + + if (string_list_has_string(&mayhem_list, "close_write")) { + logmayhem("close_write"); + return WR_MAYHEM | WR_HANGUP; + } + + /* [1a] */ + object_header_len = 1 + xsnprintf(object_header, MAX_HEADER_LEN, + "%s %"PRIuMAX, + type_name(*oi->typep), + (uintmax_t)*oi->sizep); + + /* [2] */ + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, object_header, object_header_len); + the_hash_algo->update_fn(&c, *oi->contentp, *oi->sizep); + the_hash_algo->final_fn(oid_check.hash, &c); + if (!oideq(oid, &oid_check)) + BUG("send_loose_object[2]: invalid construction '%s' '%s'", + oid_to_hex(oid), oid_to_hex(&oid_check)); + + /* [3, 6] */ + git_deflate_init(&stream, zlib_compression_level); + stream.next_out = compressed; + stream.avail_out = sizeof(compressed); + the_hash_algo->init_fn(&c); + + /* [3, 1a, 6] */ + stream.next_in = (unsigned char *)object_header; + stream.avail_in = object_header_len; + while (git_deflate(&stream, 0) == Z_OK) + ; /* nothing */ + the_hash_algo->update_fn(&c, object_header, object_header_len); + + /* [3, 1b, 5, 6] */ + stream.next_in = *oi->contentp; + stream.avail_in = *oi->sizep; + do { + enum worker_result wr; + unsigned char *in0 = stream.next_in; + ret = git_deflate(&stream, Z_FINISH); + the_hash_algo->update_fn(&c, in0, stream.next_in - in0); + + /* [5] */ + wr = send_chunk(fd, compressed, stream.next_out - compressed); + if (wr & WR_STOP_THE_MUSIC) + return wr; + + stream.next_out = compressed; + stream.avail_out = sizeof(compressed); + + } while (ret == Z_OK); + + /* [3] */ + if (ret != Z_STREAM_END) + BUG("unable to deflate object '%s' (%d)", oid_to_hex(oid), ret); + ret = git_deflate_end_gently(&stream); + if (ret != Z_OK) + BUG("deflateEnd on object '%s' failed (%d)", oid_to_hex(oid), ret); + + /* [6] */ + the_hash_algo->final_fn(oid_check.hash, &c); + if (!oideq(oid, &oid_check)) + BUG("send_loose_object[6]: invalid construction '%s' '%s'", + oid_to_hex(oid), oid_to_hex(&oid_check)); + + /* [5] */ + return send_final_chunk(fd); +} + +/* + * Per the GVFS Protocol, a single OID should be in the slash-arg: + * + * GET /gvfs/objects/fc3fff3a25559d2d30d1719c4f4a6d9fe7e05170 HTTP/1.1 + * + * Look it up in our repo (loose or packed) and send it to gvfs-helper + * over the socket as a loose object. + */ +static enum worker_result do__gvfs_objects__get(struct req *req) +{ + struct object_id oid; + void *content; + unsigned long size; + enum object_type type; + struct object_info oi = OBJECT_INFO_INIT; + unsigned flags = 0; + + /* + * Since `test-gvfs-protocol` is mocking a real GVFS server (cache or + * main), we don't want a request for a missing object to cause the + * implicit dynamic fetch mechanism to try to fault-it-in (and cause + * our call to oid_object_info_extended() to launch another instance + * of `gvfs-helper` to magically fetch it (which would connect to a + * new instance of `test-gvfs-protocol`)). + * + * Rather, we want a missing object to fail, so we can respond with + * a 404, for example. + */ + flags |= OBJECT_INFO_FOR_PREFETCH; + flags |= OBJECT_INFO_LOOKUP_REPLACE; + + if (!req->slash_args.len || + get_oid_hex(req->slash_args.buf, &oid)) { + logerror("invalid OID in GET gvfs/objects: '%s'", + req->slash_args.buf); + return WR_IO_ERROR; + } + + trace2_printf("%s: GET %s", TR2_CAT, oid_to_hex(&oid)); + + oi.typep = &type; + oi.sizep = &size; + oi.contentp = &content; + + if (oid_object_info_extended(the_repository, &oid, &oi, flags)) { + logerror("Could not find OID: '%s'", oid_to_hex(&oid)); + return send_http_error(1, 404, "Not Found", -1, WR_OK); + } + + if (string_list_has_string(&mayhem_list, "http_404")) { + logmayhem("http_404"); + return send_http_error(1, 404, "Not Found", -1, WR_MAYHEM); + } + + trace2_printf("%s: OBJECT type=%d len=%ld '%.40s'", TR2_CAT, + type, size, (const char *)content); + + return send_loose_object(&oi, &oid, 1); +} + +static enum worker_result read_json_post_body( + struct req *req, + struct oidset *oids) +{ + struct object_id oid; + struct string_list_item *item; + char *post_body = NULL; + const char *v; + ssize_t len_expected = 0; + ssize_t len_received; + const char *pkey; + const char *plbracket; + const char *pstart; + const char *pend; + + for_each_string_list_item(item, &req->header_list) { + if (skip_prefix(item->string, "Content-Length: ", &v)) { + char *p; + len_expected = strtol(v, &p, 10); + break; + } + } + if (!len_expected) { + logerror("no content length in POST"); + return WR_IO_ERROR; + } + post_body = xcalloc(1, len_expected + 1); + if (!post_body) { + logerror("could not malloc buffer for POST body"); + return WR_IO_ERROR; + } + len_received = read_in_full(0, post_body, len_expected); + if (len_received != len_expected) { + logerror("short read in POST (expected %d, received %d)", + (int)len_expected, (int)len_received); + return WR_IO_ERROR; + } + + /* + * A very primitive JSON parser for a very fixed and well-known + * message format. Please don't judge me. + * + * We expect: + * + * ..."objectIds":["","",...""]... + * + * We expect compact (non-pretty) JSON, but do allow it. + */ + pkey = strstr(post_body, "\"objectIds\""); + if (!pkey) + goto could_not_parse_json; + plbracket = strchr(pkey, '['); + if (!plbracket) + goto could_not_parse_json; + pstart = plbracket + 1; + + while (1) { + /* Eat leading whitespace before opening DQUOTE */ + while (*pstart && isspace(*pstart)) + pstart++; + if (!*pstart) + goto could_not_parse_json; + pstart++; + + /* find trailing DQUOTE */ + pend = strchr(pstart, '"'); + if (!pend) + goto could_not_parse_json; + + if (get_oid_hex(pstart, &oid)) + goto could_not_parse_json; + oidset_insert(oids, &oid); + trace2_printf("%s: POST %s", TR2_CAT, oid_to_hex(&oid)); + + /* Eat trailing whitespace after trailing DQUOTE */ + pend++; + while (*pend && isspace(*pend)) + pend++; + if (!*pend) + goto could_not_parse_json; + + /* End of list or is there another OID */ + if (*pend == ']') + break; + if (*pend != ',') + goto could_not_parse_json; + + pstart = pend + 1; + } + + /* + * We do not care about the "commitDepth" parameter. + */ + + free(post_body); + return WR_OK; + +could_not_parse_json: + logerror("could not parse JSON in POST body"); + free(post_body); + return WR_IO_ERROR; +} + +/* + * Since this is a test helper, I'm going to be lazy and + * run pack-objects as a background child using pipe_command + * and get the resulting packfile into a buffer. And then + * the caller can pump it to the client over the socket. + * + * This avoids the need to set up a custom loop (like in + * upload-pack) to drive it and/or the use of a bunch of + * tempfiles. + * + * My assumption here is that we're not testing with GBs + * of data.... + * + * Note: The GVFS Protocol POST verb behaves like GET for + * Note: non-commit objects (in that it just returns the + * Note: requested object), but for commit objects POST + * Note: *also* returns all trees referenced by the commit. + * Note: + * Note: Since the goal of this test is to confirm that + * Note: gvfs-helper can request and receive a packfile + * Note: *at all*, I'm not going to blur the issue and + * Note: support the extra semantics for commit objects. + */ +static enum worker_result get_packfile_from_oids( + struct oidset *oids, + struct strbuf *buf_packfile) +{ + struct child_process pack_objects = CHILD_PROCESS_INIT; + struct strbuf buf_child_stdin = STRBUF_INIT; + struct strbuf buf_child_stderr = STRBUF_INIT; + struct oidset_iter iter; + struct object_id *oid; + enum worker_result wr; + int result; + + argv_array_push(&pack_objects.args, "git"); + argv_array_push(&pack_objects.args, "pack-objects"); + argv_array_push(&pack_objects.args, "-q"); + argv_array_push(&pack_objects.args, "--revs"); + argv_array_push(&pack_objects.args, "--delta-base-offset"); + argv_array_push(&pack_objects.args, "--window=0"); + argv_array_push(&pack_objects.args, "--depth=4095"); + argv_array_push(&pack_objects.args, "--compression=1"); + argv_array_push(&pack_objects.args, "--stdout"); + + pack_objects.in = -1; + pack_objects.out = -1; + pack_objects.err = -1; + + oidset_iter_init(oids, &iter); + while ((oid = oidset_iter_next(&iter))) + strbuf_addf(&buf_child_stdin, "%s\n", oid_to_hex(oid)); + strbuf_addstr(&buf_child_stdin, "\n"); + + result = pipe_command(&pack_objects, + buf_child_stdin.buf, buf_child_stdin.len, + buf_packfile, 0, + &buf_child_stderr, 0); + if (result) { + logerror("pack-objects failed: %s", buf_child_stderr.buf); + wr = WR_IO_ERROR; + goto done; + } + + trace2_printf("%s: pack-objects returned %d bytes", TR2_CAT, buf_packfile->len); + wr = WR_OK; + +done: + strbuf_release(&buf_child_stdin); + strbuf_release(&buf_child_stderr); + + return wr; +} + +static enum worker_result send_packfile_from_buffer(const struct strbuf *packfile) +{ + struct strbuf response_header = STRBUF_INIT; + struct strbuf uuid = STRBUF_INIT; + enum worker_result wr; + + strbuf_addstr(&response_header, "HTTP/1.1 200 OK\r\n"); + strbuf_addstr(&response_header, "Cache-Control: private\r\n"); + strbuf_addstr(&response_header, "Content-Type: application/x-git-packfile\r\n"); + strbuf_addf( &response_header, "Content-Length: %d\r\n", (int)packfile->len); + strbuf_addf( &response_header, "Server: test-gvfs-protocol/%s\r\n", git_version_string); + strbuf_addf( &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822))); + gen_fake_uuid(&uuid); + strbuf_addf( &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf); + strbuf_addstr(&response_header, "\r\n"); + + if (write_in_full(1, response_header.buf, response_header.len) < 0) { + logerror("unable to write response header"); + wr = WR_IO_ERROR; + goto done; + } + + if (write_in_full(1, packfile->buf, packfile->len) < 0) { + logerror("unable to write response content body"); + wr = WR_IO_ERROR; + goto done; + } + + wr = WR_OK; + +done: + strbuf_release(&uuid); + strbuf_release(&response_header); + + return wr; +} + +static enum worker_result do__gvfs_objects__post(struct req *req) +{ + struct oidset oids = OIDSET_INIT; + struct strbuf packfile = STRBUF_INIT; + enum worker_result wr; + + wr = read_json_post_body(req, &oids); + if (wr & WR_STOP_THE_MUSIC) + goto done; + + wr = get_packfile_from_oids(&oids, &packfile); + if (wr & WR_STOP_THE_MUSIC) + goto done; + + wr = send_packfile_from_buffer(&packfile); + +done: + oidset_clear(&oids); + strbuf_release(&packfile); + + return wr; +} + +/* + * Read the HTTP request up to the start of the optional message-body. + * We do this byte-by-byte because we have keep-alive turned on and + * cannot rely on an EOF. + * + * https://tools.ietf.org/html/rfc7230 + * https://github.com/microsoft/VFSForGit/blob/master/Protocol.md + * + * We cannot call die() here because our caller needs to properly + * respond to the client and/or close the socket before this + * child exits so that the client doesn't get a connection reset + * by peer error. + */ +static enum worker_result req__read(struct req *req, int fd) +{ + struct strbuf h = STRBUF_INIT; + int nr_start_line_fields; + const char *uri_target; + const char *http_version; + const char *gvfs; + + /* + * Read line 0 of the request and split it into component parts: + * + * SP SP CRLF + * + */ + if (strbuf_getwholeline_fd(&req->start_line, fd, '\n') == EOF) + return WR_OK | WR_HANGUP; + + if (string_list_has_string(&mayhem_list, "close_read")) { + logmayhem("close_read"); + return WR_MAYHEM | WR_HANGUP; + } + + if (string_list_has_string(&mayhem_list, "close_read_1") && + mayhem_child == 0) { + /* + * Mayhem: fail the first request, but let retries succeed. + */ + logmayhem("close_read_1"); + return WR_MAYHEM | WR_HANGUP; + } + + strbuf_trim_trailing_newline(&req->start_line); + + nr_start_line_fields = string_list_split(&req->start_line_fields, + req->start_line.buf, + ' ', -1); + if (nr_start_line_fields != 3) { + logerror("could not parse request start-line '%s'", + req->start_line.buf); + return WR_IO_ERROR; + } + uri_target = req->start_line_fields.items[1].string; + http_version = req->start_line_fields.items[2].string; + + if (strcmp(http_version, "HTTP/1.1")) { + logerror("unsuported version '%s' (expecting HTTP/1.1)", + http_version); + return WR_IO_ERROR; + } + + /* + * Next, extract the GVFS terms from the . The + * GVFS Protocol defines a REST API containing several GVFS + * commands of the form: + * + * []/gvfs/[/] + * []/gvfs/[?] + * + * For example: + * "GET /gvfs/config HTTP/1.1" + * "GET /gvfs/objects/aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd HTTP/1.1" + * "GET /gvfs/prefetch?lastPackTimestamp=123456789 HTTP/1.1" + * + * "GET //gvfs/config HTTP/1.1" + * "GET //gvfs/objects/aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd HTTP/1.1" + * "GET //gvfs/prefetch?lastPackTimestamp=123456789 HTTP/1.1" + * + * "POST //gvfs/objects HTTP/1.1" + * + * For other testing later, we also allow non-gvfs URLs of the form: + * "GET /[?] HTTP/1.1" + * + * We do not attempt to split the query-params within the args. + * The caller can do that if they need to. + */ + gvfs = strstr(uri_target, "/gvfs/"); + if (gvfs) { + strbuf_add(&req->uri_base, uri_target, (gvfs - uri_target)); + strbuf_trim_trailing_dir_sep(&req->uri_base); + + gvfs += 6; /* skip "/gvfs/" */ + strbuf_add(&req->gvfs_api, "gvfs/", 5); + while (*gvfs && *gvfs != '/' && *gvfs != '?') + strbuf_addch(&req->gvfs_api, *gvfs++); + + /* + */ + if (*gvfs == '/') + strbuf_addstr(&req->slash_args, gvfs + 1); + else if (*gvfs == '?') + strbuf_addstr(&req->quest_args, gvfs + 1); + } else { + + const char *quest = strchr(uri_target, '?'); + + if (quest) { + strbuf_add(&req->uri_base, uri_target, (quest - uri_target)); + strbuf_trim_trailing_dir_sep(&req->uri_base); + strbuf_addstr(&req->quest_args, quest + 1); + } else { + strbuf_addstr(&req->uri_base, uri_target); + strbuf_trim_trailing_dir_sep(&req->uri_base); + } + } + + /* + * Read the set of HTTP headers into a string-list. + */ + while (1) { + if (strbuf_getwholeline_fd(&h, fd, '\n') == EOF) + goto done; + strbuf_trim_trailing_newline(&h); + + if (!h.len) + goto done; /* a blank line ends the header */ + + string_list_append(&req->header_list, + strbuf_detach(&h, NULL)); + } + + /* + * TODO If the set of HTTP headers includes things like: + * TODO + * TODO Connection: Upgrade, HTTP2-Settings + * TODO Upgrade: h2c + * TODO HTTP2-Settings: AAMAAABkAARAAAAAAAIAAAAA + * TODO + * TODO then the client is asking to optionally switch to HTTP/2. + * TODO + * TODO We currently DO NOT support that (and I don't currently + * TODO see a need to do so (because we don't need the multiplexed + * TODO streams feature (because the client never asks for n packfiles + * TODO at the same time))). + * TODO + * TODO https://en.wikipedia.org/wiki/HTTP/1.1_Upgrade_header + */ + + /* + * We do not attempt to read the , if it exists. + * We let our caller read/chunk it in as appropriate. + */ +done: + if (trace2_is_enabled()) { + struct string_list_item *item; + trace2_printf("%s: %s", TR2_CAT, req->start_line.buf); + for_each_string_list_item(item, &req->start_line_fields) + trace2_printf("%s: Field: %s", TR2_CAT, item->string); + trace2_printf("%s: [uri-base '%s'][gvfs '%s'][args '%s' '%s']", + TR2_CAT, + req->uri_base.buf, + req->gvfs_api.buf, + req->slash_args.buf, + req->quest_args.buf); + for_each_string_list_item(item, &req->header_list) + trace2_printf("%s: Hdrs: %s", TR2_CAT, item->string); + } + + return WR_OK; +} + +static enum worker_result dispatch(struct req *req) +{ + const char *method; + enum worker_result wr; + + if (string_list_has_string(&mayhem_list, "close_no_write")) { + logmayhem("close_no_write"); + return WR_MAYHEM | WR_HANGUP; + } + if (string_list_has_string(&mayhem_list, "http_503")) { + logmayhem("http_503"); + return send_http_error(1, 503, "Service Unavailable", 2, + WR_MAYHEM | WR_HANGUP); + } + if (string_list_has_string(&mayhem_list, "http_429")) { + logmayhem("http_429"); + return send_http_error(1, 429, "Too Many Requests", 2, + WR_MAYHEM | WR_HANGUP); + } + if (string_list_has_string(&mayhem_list, "http_429_1") && + mayhem_child == 0) { + logmayhem("http_429_1"); + return send_http_error(1, 429, "Too Many Requests", 2, + WR_MAYHEM | WR_HANGUP); + } + if (mayhem_try_auth(req, &wr)) + return wr; + + method = req->start_line_fields.items[0].string; + + if (!strcmp(req->gvfs_api.buf, "gvfs/objects")) { + + if (!strcmp(method, "GET")) + return do__gvfs_objects__get(req); + if (!strcmp(method, "POST")) + return do__gvfs_objects__post(req); + } + + if (!strcmp(req->gvfs_api.buf, "gvfs/config")) { + + if (!strcmp(method, "GET")) + return do__gvfs_config__get(req); + } + + return send_http_error(1, 501, "Not Implemented", -1, + WR_OK | WR_HANGUP); +} + +static enum worker_result worker(void) +{ + struct req req = REQ__INIT; + char *client_addr = getenv("REMOTE_ADDR"); + char *client_port = getenv("REMOTE_PORT"); + enum worker_result wr = WR_OK; + + if (client_addr) + loginfo("Connection from %s:%s", client_addr, client_port); + + set_keep_alive(0); + + while (1) { + req__release(&req); + + alarm(init_timeout ? init_timeout : timeout); + wr = req__read(&req, 0); + alarm(0); + + if (wr & WR_STOP_THE_MUSIC) + break; + + wr = dispatch(&req); + if (wr & WR_STOP_THE_MUSIC) + break; + } + + close(0); + close(1); + + return !!(wr & WR_IO_ERROR); +} + +////////////////////////////////////////////////////////////////// +// This section contains the listener and child-process management +// code used by the primary instance to accept incoming connections +// and dispatch them to async child process "worker" instances. +////////////////////////////////////////////////////////////////// + +static int addrcmp(const struct sockaddr_storage *s1, + const struct sockaddr_storage *s2) +{ + const struct sockaddr *sa1 = (const struct sockaddr*) s1; + const struct sockaddr *sa2 = (const struct sockaddr*) s2; + + if (sa1->sa_family != sa2->sa_family) + return sa1->sa_family - sa2->sa_family; + if (sa1->sa_family == AF_INET) + return memcmp(&((struct sockaddr_in *)s1)->sin_addr, + &((struct sockaddr_in *)s2)->sin_addr, + sizeof(struct in_addr)); +#ifndef NO_IPV6 + if (sa1->sa_family == AF_INET6) + return memcmp(&((struct sockaddr_in6 *)s1)->sin6_addr, + &((struct sockaddr_in6 *)s2)->sin6_addr, + sizeof(struct in6_addr)); +#endif + return 0; +} + +static int max_connections = 32; + +static unsigned int live_children; + +static struct child { + struct child *next; + struct child_process cld; + struct sockaddr_storage address; +} *firstborn; + +static void add_child(struct child_process *cld, struct sockaddr *addr, socklen_t addrlen) +{ + struct child *newborn, **cradle; + + newborn = xcalloc(1, sizeof(*newborn)); + live_children++; + memcpy(&newborn->cld, cld, sizeof(*cld)); + memcpy(&newborn->address, addr, addrlen); + for (cradle = &firstborn; *cradle; cradle = &(*cradle)->next) + if (!addrcmp(&(*cradle)->address, &newborn->address)) + break; + newborn->next = *cradle; + *cradle = newborn; +} + +/* + * This gets called if the number of connections grows + * past "max_connections". + * + * We kill the newest connection from a duplicate IP. + */ +static void kill_some_child(void) +{ + const struct child *blanket, *next; + + if (!(blanket = firstborn)) + return; + + for (; (next = blanket->next); blanket = next) + if (!addrcmp(&blanket->address, &next->address)) { + kill(blanket->cld.pid, SIGTERM); + break; + } +} + +static void check_dead_children(void) +{ + int status; + pid_t pid; + + struct child **cradle, *blanket; + for (cradle = &firstborn; (blanket = *cradle);) + if ((pid = waitpid(blanket->cld.pid, &status, WNOHANG)) > 1) { + const char *dead = ""; + if (status) + dead = " (with error)"; + loginfo("[%"PRIuMAX"] Disconnected%s", (uintmax_t)pid, dead); + + /* remove the child */ + *cradle = blanket->next; + live_children--; + child_process_clear(&blanket->cld); + free(blanket); + } else + cradle = &blanket->next; +} + +static struct argv_array cld_argv = ARGV_ARRAY_INIT; +static void handle(int incoming, struct sockaddr *addr, socklen_t addrlen) +{ + struct child_process cld = CHILD_PROCESS_INIT; + + if (max_connections && live_children >= max_connections) { + kill_some_child(); + sleep(1); /* give it some time to die */ + check_dead_children(); + if (live_children >= max_connections) { + close(incoming); + logerror("Too many children, dropping connection"); + return; + } + } + + if (addr->sa_family == AF_INET) { + char buf[128] = ""; + struct sockaddr_in *sin_addr = (void *) addr; + inet_ntop(addr->sa_family, &sin_addr->sin_addr, buf, sizeof(buf)); + argv_array_pushf(&cld.env_array, "REMOTE_ADDR=%s", buf); + argv_array_pushf(&cld.env_array, "REMOTE_PORT=%d", + ntohs(sin_addr->sin_port)); +#ifndef NO_IPV6 + } else if (addr->sa_family == AF_INET6) { + char buf[128] = ""; + struct sockaddr_in6 *sin6_addr = (void *) addr; + inet_ntop(AF_INET6, &sin6_addr->sin6_addr, buf, sizeof(buf)); + argv_array_pushf(&cld.env_array, "REMOTE_ADDR=[%s]", buf); + argv_array_pushf(&cld.env_array, "REMOTE_PORT=%d", + ntohs(sin6_addr->sin6_port)); +#endif + } + + if (mayhem_list.nr) { + argv_array_pushf(&cld.env_array, "MAYHEM_CHILD=%d", + mayhem_child++); + } + + cld.argv = cld_argv.argv; + cld.in = incoming; + cld.out = dup(incoming); + + if (cld.out < 0) + logerror("could not dup() `incoming`"); + else if (start_command(&cld)) + logerror("unable to fork"); + else + add_child(&cld, addr, addrlen); +} + +static void child_handler(int signo) +{ + /* + * Otherwise empty handler because systemcalls will get interrupted + * upon signal receipt + * SysV needs the handler to be rearmed + */ + signal(SIGCHLD, child_handler); +} + +static int set_reuse_addr(int sockfd) +{ + int on = 1; + + if (!reuseaddr) + return 0; + return setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, + &on, sizeof(on)); +} + +struct socketlist { + int *list; + size_t nr; + size_t alloc; +}; + +static const char *ip2str(int family, struct sockaddr *sin, socklen_t len) +{ +#ifdef NO_IPV6 + static char ip[INET_ADDRSTRLEN]; +#else + static char ip[INET6_ADDRSTRLEN]; +#endif + + switch (family) { +#ifndef NO_IPV6 + case AF_INET6: + inet_ntop(family, &((struct sockaddr_in6*)sin)->sin6_addr, ip, len); + break; +#endif + case AF_INET: + inet_ntop(family, &((struct sockaddr_in*)sin)->sin_addr, ip, len); + break; + default: + xsnprintf(ip, sizeof(ip), ""); + } + return ip; +} + +#ifndef NO_IPV6 + +static int setup_named_sock(char *listen_addr, int listen_port, struct socketlist *socklist) +{ + int socknum = 0; + char pbuf[NI_MAXSERV]; + struct addrinfo hints, *ai0, *ai; + int gai; + long flags; + + xsnprintf(pbuf, sizeof(pbuf), "%d", listen_port); + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + hints.ai_protocol = IPPROTO_TCP; + hints.ai_flags = AI_PASSIVE; + + gai = getaddrinfo(listen_addr, pbuf, &hints, &ai0); + if (gai) { + logerror("getaddrinfo() for %s failed: %s", listen_addr, gai_strerror(gai)); + return 0; + } + + for (ai = ai0; ai; ai = ai->ai_next) { + int sockfd; + + sockfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); + if (sockfd < 0) + continue; + if (sockfd >= FD_SETSIZE) { + logerror("Socket descriptor too large"); + close(sockfd); + continue; + } + +#ifdef IPV6_V6ONLY + if (ai->ai_family == AF_INET6) { + int on = 1; + setsockopt(sockfd, IPPROTO_IPV6, IPV6_V6ONLY, + &on, sizeof(on)); + /* Note: error is not fatal */ + } +#endif + + if (set_reuse_addr(sockfd)) { + logerror("Could not set SO_REUSEADDR: %s", strerror(errno)); + close(sockfd); + continue; + } + + set_keep_alive(sockfd); + + if (bind(sockfd, ai->ai_addr, ai->ai_addrlen) < 0) { + logerror("Could not bind to %s: %s", + ip2str(ai->ai_family, ai->ai_addr, ai->ai_addrlen), + strerror(errno)); + close(sockfd); + continue; /* not fatal */ + } + if (listen(sockfd, 5) < 0) { + logerror("Could not listen to %s: %s", + ip2str(ai->ai_family, ai->ai_addr, ai->ai_addrlen), + strerror(errno)); + close(sockfd); + continue; /* not fatal */ + } + + flags = fcntl(sockfd, F_GETFD, 0); + if (flags >= 0) + fcntl(sockfd, F_SETFD, flags | FD_CLOEXEC); + + ALLOC_GROW(socklist->list, socklist->nr + 1, socklist->alloc); + socklist->list[socklist->nr++] = sockfd; + socknum++; + } + + freeaddrinfo(ai0); + + return socknum; +} + +#else /* NO_IPV6 */ + +static int setup_named_sock(char *listen_addr, int listen_port, struct socketlist *socklist) +{ + struct sockaddr_in sin; + int sockfd; + long flags; + + memset(&sin, 0, sizeof sin); + sin.sin_family = AF_INET; + sin.sin_port = htons(listen_port); + + if (listen_addr) { + /* Well, host better be an IP address here. */ + if (inet_pton(AF_INET, listen_addr, &sin.sin_addr.s_addr) <= 0) + return 0; + } else { + sin.sin_addr.s_addr = htonl(INADDR_ANY); + } + + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (sockfd < 0) + return 0; + + if (set_reuse_addr(sockfd)) { + logerror("Could not set SO_REUSEADDR: %s", strerror(errno)); + close(sockfd); + return 0; + } + + set_keep_alive(sockfd); + + if ( bind(sockfd, (struct sockaddr *)&sin, sizeof sin) < 0 ) { + logerror("Could not bind to %s: %s", + ip2str(AF_INET, (struct sockaddr *)&sin, sizeof(sin)), + strerror(errno)); + close(sockfd); + return 0; + } + + if (listen(sockfd, 5) < 0) { + logerror("Could not listen to %s: %s", + ip2str(AF_INET, (struct sockaddr *)&sin, sizeof(sin)), + strerror(errno)); + close(sockfd); + return 0; + } + + flags = fcntl(sockfd, F_GETFD, 0); + if (flags >= 0) + fcntl(sockfd, F_SETFD, flags | FD_CLOEXEC); + + ALLOC_GROW(socklist->list, socklist->nr + 1, socklist->alloc); + socklist->list[socklist->nr++] = sockfd; + return 1; +} + +#endif + +static void socksetup(struct string_list *listen_addr, int listen_port, struct socketlist *socklist) +{ + if (!listen_addr->nr) + setup_named_sock("127.0.0.1", listen_port, socklist); + else { + int i, socknum; + for (i = 0; i < listen_addr->nr; i++) { + socknum = setup_named_sock(listen_addr->items[i].string, + listen_port, socklist); + + if (socknum == 0) + logerror("unable to allocate any listen sockets for host %s on port %u", + listen_addr->items[i].string, listen_port); + } + } +} + +static int service_loop(struct socketlist *socklist) +{ + struct pollfd *pfd; + int i; + + pfd = xcalloc(socklist->nr, sizeof(struct pollfd)); + + for (i = 0; i < socklist->nr; i++) { + pfd[i].fd = socklist->list[i]; + pfd[i].events = POLLIN; + } + + signal(SIGCHLD, child_handler); + + for (;;) { + int i; + int nr_ready; + int timeout = (pid_file ? 100 : -1); + + check_dead_children(); + + nr_ready = poll(pfd, socklist->nr, timeout); + if (nr_ready < 0) { + if (errno != EINTR) { + logerror("Poll failed, resuming: %s", + strerror(errno)); + sleep(1); + } + continue; + } + else if (nr_ready == 0) { + /* + * If we have a pid_file, then we watch it. + * If someone deletes it, we shutdown the service. + * The shell scripts in the test suite will use this. + */ + if (!pid_file || file_exists(pid_file)) + continue; + goto shutdown; + } + + for (i = 0; i < socklist->nr; i++) { + if (pfd[i].revents & POLLIN) { + union { + struct sockaddr sa; + struct sockaddr_in sai; +#ifndef NO_IPV6 + struct sockaddr_in6 sai6; +#endif + } ss; + socklen_t sslen = sizeof(ss); + int incoming = accept(pfd[i].fd, &ss.sa, &sslen); + if (incoming < 0) { + switch (errno) { + case EAGAIN: + case EINTR: + case ECONNABORTED: + continue; + default: + die_errno("accept returned"); + } + } + handle(incoming, &ss.sa, sslen); + } + } + } + +shutdown: + loginfo("Starting graceful shutdown (pid-file gone)"); + for (i = 0; i < socklist->nr; i++) + close(socklist->list[i]); + + return 0; +} + +static int serve(struct string_list *listen_addr, int listen_port) +{ + struct socketlist socklist = { NULL, 0, 0 }; + + socksetup(listen_addr, listen_port, &socklist); + if (socklist.nr == 0) + die("unable to allocate any listen sockets on port %u", + listen_port); + + loginfo("Ready to rumble"); + + /* + * Wait to create the pid-file until we've setup the sockets + * and are open for business. + */ + if (pid_file) + write_file(pid_file, "%"PRIuMAX, (uintmax_t) getpid()); + + return service_loop(&socklist); +} + +////////////////////////////////////////////////////////////////// +// This section is executed by both the primary instance and all +// worker instances. So, yes, each child-process re-parses the +// command line argument and re-discovers how it should behave. +////////////////////////////////////////////////////////////////// + +int cmd_main(int argc, const char **argv) +{ + int listen_port = 0; + struct string_list listen_addr = STRING_LIST_INIT_NODUP; + int worker_mode = 0; + int i; + + trace2_cmd_name("test-gvfs-protocol"); + setup_git_directory_gently(NULL); + + for (i = 1; i < argc; i++) { + const char *arg = argv[i]; + const char *v; + + if (skip_prefix(arg, "--listen=", &v)) { + string_list_append(&listen_addr, xstrdup_tolower(v)); + continue; + } + if (skip_prefix(arg, "--port=", &v)) { + char *end; + unsigned long n; + n = strtoul(v, &end, 0); + if (*v && !*end) { + listen_port = n; + continue; + } + } + if (!strcmp(arg, "--worker")) { + worker_mode = 1; + trace2_cmd_mode("worker"); + continue; + } + if (!strcmp(arg, "--verbose")) { + verbose = 1; + continue; + } + if (skip_prefix(arg, "--timeout=", &v)) { + timeout = atoi(v); + continue; + } + if (skip_prefix(arg, "--init-timeout=", &v)) { + init_timeout = atoi(v); + continue; + } + if (skip_prefix(arg, "--max-connections=", &v)) { + max_connections = atoi(v); + if (max_connections < 0) + max_connections = 0; /* unlimited */ + continue; + } + if (!strcmp(arg, "--reuseaddr")) { + reuseaddr = 1; + continue; + } + if (skip_prefix(arg, "--pid-file=", &v)) { + pid_file = v; + continue; + } + if (skip_prefix(arg, "--mayhem=", &v)) { + string_list_append(&mayhem_list, v); + continue; + } + + usage(test_gvfs_protocol_usage); + } + + /* avoid splitting a message in the middle */ + setvbuf(stderr, NULL, _IOFBF, 4096); + + if (listen_port == 0) + listen_port = DEFAULT_GIT_PORT; + + /* + * If no --listen= args are given, the setup_named_sock() + * code will use receive a NULL address and set INADDR_ANY. + * This exposes both internal and external interfaces on the + * port. + * + * Disallow that and default to the internal-use-only loopback + * address. + */ + if (!listen_addr.nr) + string_list_append(&listen_addr, "127.0.0.1"); + + /* + * worker_mode is set in our own child process instances + * (that are bound to a connected socket from a client). + */ + if (worker_mode) { + if (mayhem_list.nr) { + const char *string = getenv("MAYHEM_CHILD"); + if (string && *string) + mayhem_child = atoi(string); + } + + build_gvfs_config_json(&jw_config, &listen_addr, listen_port); + + return worker(); + } + + /* + * `cld_argv` is a bit of a clever hack. The top-level instance + * of test-gvfs-protocol.exe does the normal bind/listen/accept + * stuff. For each incoming socket, the top-level process spawns + * a child instance of test-gvfs-protocol.exe *WITH* the additional + * `--worker` argument. This causes the child to set `worker_mode` + * and immediately call `worker()` using the connected socket (and + * without the usual need for fork() or threads). + * + * The magic here is made possible because `cld_argv` is static + * and handle() (called by service_loop()) knows about it. + */ + argv_array_push(&cld_argv, argv[0]); + argv_array_push(&cld_argv, "--worker"); + for (i = 1; i < argc; ++i) + argv_array_push(&cld_argv, argv[i]); + + /* + * Setup primary instance to listen for connections. + */ + return serve(&listen_addr, listen_port); +} diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh new file mode 100755 index 00000000000000..6c960accce17c1 --- /dev/null +++ b/t/t5799-gvfs-helper.sh @@ -0,0 +1,972 @@ +#!/bin/sh + +test_description='test gvfs-helper and GVFS Protocol' + +. ./test-lib.sh + +# Set the port for t/helper/test-gvfs-protocol.exe from either the +# environment or from the test number of this shell script. +# +test_set_port GIT_TEST_GVFS_PROTOCOL_PORT + +# Setup the following repos: +# +# repo_src: +# A normal, no-magic, fully-populated clone of something. +# No GVFS (aka VFS4G). No Scalar. No partial-clone. +# This will be used by "t/helper/test-gvfs-protocol.exe" +# to serve objects. +# +# repo_t1: +# An empty repo with no contents nor commits. That is, +# everything is missing. For the tests based on this repo, +# we don't care why it is missing objects (or if we could +# actually use it). We are only testing explicit object +# fetching using gvfs-helper.exe in isolation. +# +REPO_SRC="$PWD"/repo_src +REPO_T1="$PWD"/repo_t1 + +# Setup some loopback URLs where test-gvfs-protocol.exe will be +# listening. We will spawn it directly inside the repo_src directory, +# so we don't need any of the directory mapping or configuration +# machinery found in "git-daemon.exe" or "git-http-backend.exe". +# +# This lets us use the "uri-base" part of the URL (prior to the REST +# API "/gvfs/") to control how our mock server responds. For +# example, only the origin (main Git) server supports "/gvfs/config". +# +# For example, this means that if we add a remote containing $ORIGIN_URL, +# it will work with gvfs-helper, but not for fetch (without some mapping +# tricks). +# +HOST_PORT=127.0.0.1:$GIT_TEST_GVFS_PROTOCOL_PORT +ORIGIN_URL=http://$HOST_PORT/servertype/origin +CACHE_URL=http://$HOST_PORT/servertype/cache + +SHARED_CACHE_T1="$PWD"/shared_cache_t1 + +# The pid-file is created by test-gvfs-protocol.exe when it starts. +# The server will shut down if/when we delete it. (This is a little +# easier than killing it by PID.) +# +PID_FILE="$PWD"/pid-file.pid +SERVER_LOG="$PWD"/OUT.server.log + +PATH="$GIT_BUILD_DIR/t/helper/:$PATH" && export PATH + +OIDS_FILE="$PWD"/oid_list.txt +OIDS_CT_FILE="$PWD"/oid_ct_list.txt +OIDS_BLOBS_FILE="$PWD"/oids_blobs_file.txt +OID_ONE_BLOB_FILE="$PWD"/oid_one_blob_file.txt + +# Get a list of available OIDs in repo_src so that we can try to fetch +# them and so that we don't have to hard-code a list of known OIDs. +# This doesn't need to be a complete list -- just enough to drive some +# representative tests. +# +# Optionally require that we find a minimum number of OIDs. +# +get_list_of_oids () { + git -C "$REPO_SRC" rev-list --objects HEAD | sed 's/ .*//' | sort >"$OIDS_FILE" + + if test $# -eq 1 + then + actual_nr=$(( $(wc -l <"$OIDS_FILE") )) + if test $actual_nr -lt $1 + then + echo "get_list_of_oids: insufficient data. Need $1 OIDs." + return 1 + fi + fi + return 0 +} + +get_list_of_blobs_oids () { + git -C "$REPO_SRC" ls-tree HEAD | grep ' blob ' | awk "{print \$3}" | sort >"$OIDS_BLOBS_FILE" + head -1 <"$OIDS_BLOBS_FILE" >"$OID_ONE_BLOB_FILE" +} + +get_list_of_commit_and_tree_oids () { + git -C "$REPO_SRC" cat-file --batch-check --batch-all-objects | awk "/commit|tree/ {print \$1}" | sort >"$OIDS_CT_FILE" + + if test $# -eq 1 + then + actual_nr=$(( $(wc -l <"$OIDS_CT_FILE") )) + if test $actual_nr -lt $1 + then + echo "get_list_of_commit_and_tree_oids: insufficient data. Need $1 OIDs." + return 1 + fi + fi + return 0 +} + +test_expect_success 'setup repos' ' + test_create_repo "$REPO_SRC" && + # + # test_commit_bulk() does magic to create a packfile containing + # the new commits. + # + test_commit_bulk -C "$REPO_SRC" --filename="batch_a.%s.t" 9 && + cp "$REPO_SRC"/.git/refs/heads/master m1.branch && + test_commit_bulk -C "$REPO_SRC" --filename="batch_b.%s.t" 9 && + cp "$REPO_SRC"/.git/refs/heads/master m2.branch && + # + # test_commit() creates commits, trees, tags, and blobs and leave + # them loose. + # + test_commit -C "$REPO_SRC" file1.txt && + test_commit -C "$REPO_SRC" file2.txt && + test_commit -C "$REPO_SRC" file3.txt && + test_commit -C "$REPO_SRC" file4.txt && + test_commit -C "$REPO_SRC" file5.txt && + test_commit -C "$REPO_SRC" file6.txt && + test_commit -C "$REPO_SRC" file7.txt && + test_commit -C "$REPO_SRC" file8.txt && + test_commit -C "$REPO_SRC" file9.txt && + cp "$REPO_SRC"/.git/refs/heads/master m3.branch && + # + # gvfs-helper.exe writes downloaded objects to a shared-cache directory + # rather than the ODB inside the .git directory. + # + mkdir "$SHARED_CACHE_T1" && + mkdir "$SHARED_CACHE_T1/pack" && + mkdir "$SHARED_CACHE_T1/info" && + # + # setup repo_t1 and point all of the gvfs.* values to repo_src. + # + test_create_repo "$REPO_T1" && + git -C "$REPO_T1" remote add origin $ORIGIN_URL && + git -C "$REPO_T1" config --local gvfs.cache-server $CACHE_URL && + git -C "$REPO_T1" config --local gvfs.sharedcache "$SHARED_CACHE_T1" && + echo "$SHARED_CACHE_T1" >> "$REPO_T1"/.git/objects/info/alternates && + # + # + # + cat <<-EOF >creds.txt && + username=x + password=y + EOF + cat <<-EOF >creds.sh && + #!/bin/sh + cat "$PWD"/creds.txt + EOF + chmod 755 creds.sh && + git -C "$REPO_T1" config --local credential.helper "!f() { cat \"$PWD\"/creds.txt; }; f" && + # + # Create some test data sets. + # + get_list_of_oids 30 && + get_list_of_commit_and_tree_oids 30 && + get_list_of_blobs_oids +' + +stop_gvfs_protocol_server () { + if ! test -f "$PID_FILE" + then + return 0 + fi + # + # The server will shutdown automatically when we delete the pid-file. + # + rm -f "$PID_FILE" + # + # Give it a few seconds to shutdown (mainly to completely release the + # port before the next test start another instance and it attempts to + # bind to it). + # + for k in 0 1 2 3 4 + do + if grep -q "Starting graceful shutdown" "$SERVER_LOG" + then + return 0 + fi + sleep 1 + done + + echo "stop_gvfs_protocol_server: timeout waiting for server shutdown" + return 1 +} + +start_gvfs_protocol_server () { + # + # Launch our server into the background in repo_src. + # + ( + cd "$REPO_SRC" + test-gvfs-protocol --verbose \ + --listen=127.0.0.1 \ + --port=$GIT_TEST_GVFS_PROTOCOL_PORT \ + --reuseaddr \ + --pid-file="$PID_FILE" \ + 2>"$SERVER_LOG" & + ) + # + # Give it a few seconds to get started. + # + for k in 0 1 2 3 4 + do + if test -f "$PID_FILE" + then + return 0 + fi + sleep 1 + done + + echo "start_gvfs_protocol_server: timeout waiting for server startup" + return 1 +} + +start_gvfs_protocol_server_with_mayhem () { + if test $# -lt 1 + then + echo "start_gvfs_protocol_server_with_mayhem: need mayhem args" + return 1 + fi + + mayhem="" + for k in $* + do + mayhem="$mayhem --mayhem=$k" + done + # + # Launch our server into the background in repo_src. + # + ( + cd "$REPO_SRC" + test-gvfs-protocol --verbose \ + --listen=127.0.0.1 \ + --port=$GIT_TEST_GVFS_PROTOCOL_PORT \ + --reuseaddr \ + --pid-file="$PID_FILE" \ + $mayhem \ + 2>"$SERVER_LOG" & + ) + # + # Give it a few seconds to get started. + # + for k in 0 1 2 3 4 + do + if test -f "$PID_FILE" + then + return 0 + fi + sleep 1 + done + + echo "start_gvfs_protocol_server: timeout waiting for server startup" + return 1 +} + +# Verify the number of connections from the client. +# +# If keep-alive is working, a series of successful sequential requests to the +# same server should use the same TCP connection, so a simple multi-get would +# only have one connection. +# +# On the other hand, an auto-retry after a network error (mayhem) will have +# more than one for a single object request. +# +# TODO This may generate false alarm when we get to complicated tests, so +# TODO we might only want to use it for basic tests. +# +verify_connection_count () { + if test $# -eq 1 + then + expected_nr=$1 + else + expected_nr=1 + fi + + actual_nr=$(( $(grep "Connection from" "$SERVER_LOG" | wc -l) )) + + if test $actual_nr -ne $expected_nr + then + echo "verify_keep_live: expected $expected_nr; actual $actual_nr" + return 1 + fi + return 0 +} + +# Verify that the set of requested objects are present in +# the shared-cache and that there is no corruption. We use +# cat-file to hide whether the object is packed or loose in +# the test repo. +# +# Usage: +# +verify_objects_in_shared_cache () { + # + # See if any of the objects are missing from repo_t1. + # + git -C "$REPO_T1" cat-file --batch-check <"$1" >OUT.bc_actual || return 1 + grep -q " missing" OUT.bc_actual && return 1 + # + # See if any of the objects have different sizes or types than repo_src. + # + git -C "$REPO_SRC" cat-file --batch-check <"$1" >OUT.bc_expect || return 1 + test_cmp OUT.bc_expect OUT.bc_actual || return 1 + # + # See if any of the objects are corrupt in repo_t1. This fully + # reconstructs the objects and verifies the hash and therefore + # detects corruption not found by the earlier "batch-check" step. + # + git -C "$REPO_T1" cat-file --batch <"$1" >OUT.b_actual || return 1 + # + # TODO move the shared-cache directory (and/or the + # TODO .git/objects/info/alternates and temporarily unset + # TODO gvfs.sharedcache) and repeat the first "batch-check" + # TODO and make sure that they are ALL missing. + # + return 0 +} + +verify_received_packfile_count () { + if test $# -eq 1 + then + expected_nr=$1 + else + expected_nr=1 + fi + + actual_nr=$(( $(grep "packfile " OUT.output | wc -l) )) + + if test $actual_nr -ne $expected_nr + then + echo "verify_received_packfile_count: expected $expected_nr; actual $actual_nr" + return 1 + fi + return 0 +} + +per_test_cleanup () { + stop_gvfs_protocol_server + + rm -rf "$SHARED_CACHE_T1"/[0-9a-f][0-9a-f]/ + rm -rf "$SHARED_CACHE_T1"/info/* + rm -rf "$SHARED_CACHE_T1"/pack/* + + rm -rf OUT.* + return 0 +} + +################################################################# +# Basic tests to confirm the happy path works. +################################################################# + +test_expect_success 'basic: GET origin multi-get no-auth' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # Connect to the origin server (w/o auth) and make a series of + # single-object GET requests. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + <"$OIDS_FILE" >OUT.output && + + # Stop the server to prevent the verification steps from faulting-in + # any missing objects. + # + stop_gvfs_protocol_server && + + # gvfs-helper prints a "loose " message for each received object. + # Verify that gvfs-helper received each of the requested objects. + # + sed "s/loose //" OUT.actual && + test_cmp "$OIDS_FILE" OUT.actual && + + verify_objects_in_shared_cache "$OIDS_FILE" && + verify_connection_count 1 +' + +test_expect_success 'basic: GET cache-server multi-get trust-mode' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # Connect to the cache-server and make a series of + # single-object GET requests. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=trust \ + --remote=origin \ + get \ + <"$OIDS_FILE" >OUT.output && + + # Stop the server to prevent the verification steps from faulting-in + # any missing objects. + # + stop_gvfs_protocol_server && + + # gvfs-helper prints a "loose " message for each received object. + # Verify that gvfs-helper received each of the requested objects. + # + sed "s/loose //" OUT.actual && + test_cmp "$OIDS_FILE" OUT.actual && + + verify_objects_in_shared_cache "$OIDS_FILE" && + verify_connection_count 1 +' + +test_expect_success 'basic: GET gvfs/config' ' +# test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # Connect to the cache-server and make a series of + # single-object GET requests. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + config \ + <"$OIDS_FILE" >OUT.output && + + # Stop the server to prevent the verification steps from faulting-in + # any missing objects. + # + stop_gvfs_protocol_server && + + # The cache-server URL should be listed in the gvfs/config output. + # We confirm this before assuming error-mode will work. + # + grep -q "$CACHE_URL" OUT.output +' + +test_expect_success 'basic: GET cache-server multi-get error-mode' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # Connect to the cache-server and make a series of + # single-object GET requests. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=error \ + --remote=origin \ + get \ + <"$OIDS_FILE" >OUT.output && + + # Stop the server to prevent the verification steps from faulting-in + # any missing objects. + # + stop_gvfs_protocol_server && + + # gvfs-helper prints a "loose " message for each received object. + # Verify that gvfs-helper received each of the requested objects. + # + sed "s/loose //" OUT.actual && + test_cmp "$OIDS_FILE" OUT.actual && + + verify_objects_in_shared_cache "$OIDS_FILE" && + + # Technically, we have 1 connection to the origin server + # for the "gvfs/config" request and 1 to cache server to + # get the objects, but because we are using the same port + # for both, keep-alive will handle it. So 1 connection. + # + verify_connection_count 1 +' + +# The GVFS Protocol POST verb behaves like GET for non-commit objects +# (in that it just returns the requested object), but for commit +# objects POST *also* returns all trees referenced by the commit. +# +# The goal of this test is to confirm that gvfs-helper can send us +# a packfile at all. So, this test only passes blobs to not blur +# the issue. +# +test_expect_success 'basic: POST origin blobs' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # Connect to the origin server (w/o auth) and make + # multi-object POST request. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + --no-progress \ + post \ + <"$OIDS_BLOBS_FILE" >OUT.output && + + # Stop the server to prevent the verification steps from faulting-in + # any missing objects. + # + stop_gvfs_protocol_server && + + # gvfs-helper prints a "packfile " message for each received + # packfile. We verify the number of expected packfile(s) and we + # individually verify that each requested object is present in the + # shared cache (and index-pack already verified the integrity of + # the packfile), so we do not bother to run "git verify-pack -v" + # and do an exact matchup here. + # + verify_received_packfile_count 1 && + + verify_objects_in_shared_cache "$OIDS_BLOBS_FILE" && + verify_connection_count 1 +' + +################################################################# +# Tests to see how gvfs-helper responds to network problems. +# +# We use small --max-retry value because of exponential backoff. +# +# These mayhem tests are interested in how gvfs-helper gracefully +# retries when there is a network error. And verify that it gives +# up gracefully too. +################################################################# + +mayhem_observed__close__connections () { + if $(grep -q "transient" OUT.stderr) + then + # Transient errors should retry. + # 1 for initial request + 2 retries. + # + verify_connection_count 3 + return $? + elif $(grep -q "hard_fail" OUT.stderr) + then + # Hard errors should not retry. + # + verify_connection_count 1 + return $? + else + error "mayhem_observed__close: unexpected mayhem-induced error type" + return 1 + fi +} + +mayhem_observed__close () { + # Expected error codes for mayhem events: + # close_read + # close_write + # close_no_write + # + # CURLE_PARTIAL_FILE 18 + # CURLE_GOT_NOTHING 52 + # CURLE_SEND_ERROR 55 + # CURLE_RECV_ERROR 56 + # + # I don't want to pin it down to an exact error for each because there may + # be races here because of network buffering. + # + # Also, It is unclear which of these network errors should be transient + # (with retry) and which should be a hard-fail (without retry). I'm only + # going to verify the connection counts based upon what type of error + # gvfs-helper claimed it to be. + # + if $(grep -q "error: get: (curl:18)" OUT.stderr) || + $(grep -q "error: get: (curl:52)" OUT.stderr) || + $(grep -q "error: get: (curl:55)" OUT.stderr) || + $(grep -q "error: get: (curl:56)" OUT.stderr) + then + mayhem_observed__close__connections + return $? + else + echo "mayhem_observed__close: unexpected mayhem-induced error" + return 1 + fi +} + +test_expect_success 'curl-error: no server' ' + test_when_finished "per_test_cleanup" && + + # Try to do a multi-get without a server. + # + # Use small max-retry value because of exponential backoff, + # but yet do exercise retry some. + # + test_must_fail \ + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OIDS_FILE" >OUT.output 2>OUT.stderr && + + # CURLE_COULDNT_CONNECT 7 + grep -q "error: get: (curl:7)" OUT.stderr +' + +test_expect_success 'curl-error: close socket while reading request' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem close_read && + + test_must_fail \ + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OIDS_FILE" >OUT.output 2>OUT.stderr && + + stop_gvfs_protocol_server && + + mayhem_observed__close +' + +test_expect_success 'curl-error: close socket while writing response' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem close_write && + + test_must_fail \ + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OIDS_FILE" >OUT.output 2>OUT.stderr && + + stop_gvfs_protocol_server && + + mayhem_observed__close +' + +test_expect_success 'curl-error: close socket before writing response' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem close_no_write && + + test_must_fail \ + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OIDS_FILE" >OUT.output 2>OUT.stderr && + + stop_gvfs_protocol_server && + + mayhem_observed__close +' + +################################################################# +# Tests to confirm that gvfs-helper does silently recover when +# a retry succeeds. +# +# Note: I'm only to do this for 1 of the close_* mayhem events. +################################################################# + +test_expect_success 'successful retry after curl-error: origin get' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem close_read_1 && + + # Connect to the origin server (w/o auth). + # Make a single-object GET request. + # Confirm that it succeeds without error. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OID_ONE_BLOB_FILE" >OUT.output && + + stop_gvfs_protocol_server && + + # gvfs-helper prints a "loose " message for each received object. + # Verify that gvfs-helper received each of the requested objects. + # + sed "s/loose //" OUT.actual && + test_cmp "$OID_ONE_BLOB_FILE" OUT.actual && + + verify_objects_in_shared_cache "$OID_ONE_BLOB_FILE" && + verify_connection_count 2 +' + +################################################################# +# Tests to see how gvfs-helper responds to HTTP errors/problems. +# +################################################################# + +# See "enum gh__error_code" in gvfs-helper.c +# +GH__ERROR_CODE__HTTP_404=4 +GH__ERROR_CODE__HTTP_429=5 +GH__ERROR_CODE__HTTP_503=6 + +test_expect_success 'http-error: 503 Service Unavailable (with retry)' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem http_503 && + + test_expect_code $GH__ERROR_CODE__HTTP_503 \ + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OIDS_FILE" >OUT.output 2>OUT.stderr && + + stop_gvfs_protocol_server && + + grep -q "error: get: (http:503)" OUT.stderr && + verify_connection_count 3 +' + +test_expect_success 'http-error: 429 Service Unavailable (with retry)' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem http_429 && + + test_expect_code $GH__ERROR_CODE__HTTP_429 \ + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OIDS_FILE" >OUT.output 2>OUT.stderr && + + stop_gvfs_protocol_server && + + grep -q "error: get: (http:429)" OUT.stderr && + verify_connection_count 3 +' + +test_expect_success 'http-error: 404 Not Found (no retry)' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem http_404 && + + test_expect_code $GH__ERROR_CODE__HTTP_404 \ + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OID_ONE_BLOB_FILE" >OUT.output 2>OUT.stderr && + + stop_gvfs_protocol_server && + + grep -q "error: get: (http:404)" OUT.stderr && + verify_connection_count 1 +' + +################################################################# +# Tests to confirm that gvfs-helper does silently recover when an +# HTTP request succeeds after a failure. +# +################################################################# + +test_expect_success 'successful retry after http-error: origin get' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem http_429_1 && + + # Connect to the origin server (w/o auth). + # Make a single-object GET request. + # Confirm that it succeeds without error. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OID_ONE_BLOB_FILE" >OUT.output && + + stop_gvfs_protocol_server && + + # gvfs-helper prints a "loose " message for each received object. + # Verify that gvfs-helper received each of the requested objects. + # + sed "s/loose //" OUT.actual && + test_cmp "$OID_ONE_BLOB_FILE" OUT.actual && + + verify_objects_in_shared_cache "$OID_ONE_BLOB_FILE" && + verify_connection_count 2 +' + +################################################################# +# Test HTTP Auth +# +################################################################# + +test_expect_success 'HTTP GET Auth on Origin Server' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem http_401 && + + # Force server to require auth. + # Connect to the origin server without auth. + # Make a single-object GET request. + # Confirm that it gets a 401 and then retries with auth. + # + GIT_CONFIG_NOSYSTEM=1 \ + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OID_ONE_BLOB_FILE" >OUT.output && + + stop_gvfs_protocol_server && + + # gvfs-helper prints a "loose " message for each received object. + # Verify that gvfs-helper received each of the requested objects. + # + sed "s/loose //" OUT.actual && + test_cmp "$OID_ONE_BLOB_FILE" OUT.actual && + + verify_objects_in_shared_cache "$OID_ONE_BLOB_FILE" && + verify_connection_count 2 +' + +test_expect_success 'HTTP POST Auth on Origin Server' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem http_401 && + + # Connect to the origin server and make multi-object POST + # request and verify that it automatically handles the 401. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + --no-progress \ + post \ + <"$OIDS_BLOBS_FILE" >OUT.output && + + # Stop the server to prevent the verification steps from faulting-in + # any missing objects. + # + stop_gvfs_protocol_server && + + # gvfs-helper prints a "packfile " message for each received + # packfile. We verify the number of expected packfile(s) and we + # individually verify that each requested object is present in the + # shared cache (and index-pack already verified the integrity of + # the packfile), so we do not bother to run "git verify-pack -v" + # and do an exact matchup here. + # + verify_received_packfile_count 1 && + + verify_objects_in_shared_cache "$OIDS_BLOBS_FILE" && + verify_connection_count 2 +' + +test_expect_success 'HTTP GET Auth on Cache Server' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server_with_mayhem http_401 && + + # Try auth to cache-server. Note that gvfs-helper *ALWAYS* sends + # creds to cache-servers, so we will never see the "400 Bad Request" + # response. And we are using "trust" mode, so we only expect 1 + # connection to the server. + # + GIT_CONFIG_NOSYSTEM=1 \ + git -C "$REPO_T1" gvfs-helper \ + --cache-server=trust \ + --remote=origin \ + get \ + --max-retries=2 \ + <"$OID_ONE_BLOB_FILE" >OUT.output && + + stop_gvfs_protocol_server && + + # gvfs-helper prints a "loose " message for each received object. + # Verify that gvfs-helper received each of the requested objects. + # + sed "s/loose //" OUT.actual && + test_cmp "$OID_ONE_BLOB_FILE" OUT.actual && + + verify_objects_in_shared_cache "$OID_ONE_BLOB_FILE" && + verify_connection_count 1 +' + +################################################################# +# Integration tests with Git.exe +# +# Now that we have confirmed that gvfs-helper works in isolation, +# run a series of tests using random Git commands that fault-in +# objects as needed. +# +# At this point, I'm going to stop verifying the shape of the ODB +# (loose vs packfiles) and the number of connections required to +# get them. The tests from here on are to verify that objects are +# magically fetched whenever required. +################################################################# + +test_expect_success 'integration: explicit commit/trees, implicit blobs: log file' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # We have a very empty repo. Seed it with all of the commits + # and trees. The purpose of this test is to demand-load the + # needed blobs only, so we prefetch the commits and trees. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + <"$OIDS_CT_FILE" >OUT.output && + + # Confirm that we do not have the blobs locally. + # With gvfs-helper turned off, we should fail. + # + test_must_fail \ + git -C "$REPO_T1" -c core.usegvfshelper=false \ + log $(cat m3.brach) -- file9.txt \ + >OUT.output 2>OUT.stderr && + + # Turn on gvfs-helper and retry. This should implicitly fetch + # any needed blobs. + # + git -C "$REPO_T1" -c core.usegvfshelper=true \ + log $(cat m3.branch) -- file9.txt \ + >OUT.output 2>OUT.stderr && + + # Verify that gvfs-helper wrote the fetched the blobs to the + # local ODB, such that a second attempt with gvfs-helper + # turned off should succeed. + # + git -C "$REPO_T1" -c core.usegvfshelper=false \ + log $(cat m3.branch) -- file9.txt \ + >OUT.output 2>OUT.stderr +' + +test_expect_success 'integration: explicit commit/trees, implicit blobs: diff 2 commits' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # We have a very empty repo. Seed it with all of the commits + # and trees. The purpose of this test is to demand-load the + # needed blobs only, so we prefetch the commits and trees. + # + git -C "$REPO_T1" gvfs-helper \ + --cache-server=disable \ + --remote=origin \ + get \ + <"$OIDS_CT_FILE" >OUT.output && + + # Confirm that we do not have the blobs locally. + # With gvfs-helper turned off, we should fail. + # + test_must_fail \ + git -C "$REPO_T1" -c core.usegvfshelper=false \ + diff $(cat m1.branch)..$(cat m3.branch) \ + >OUT.output 2>OUT.stderr && + + # Turn on gvfs-helper and retry. This should implicitly fetch + # any needed blobs. + # + git -C "$REPO_T1" -c core.usegvfshelper=true \ + diff $(cat m1.branch)..$(cat m3.branch) \ + >OUT.output 2>OUT.stderr && + + # Verify that gvfs-helper wrote the fetched the blobs to the + # local ODB, such that a second attempt with gvfs-helper + # turned off should succeed. + # + git -C "$REPO_T1" -c core.usegvfshelper=false \ + diff $(cat m1.branch)..$(cat m3.branch) \ + >OUT.output 2>OUT.stderr +' + +test_expect_success 'integration: fully implicit: diff 2 commits' ' + test_when_finished "per_test_cleanup" && + start_gvfs_protocol_server && + + # Implicitly demand-load everything without any pre-seeding. + # + git -C "$REPO_T1" -c core.usegvfshelper=true \ + diff $(cat m1.branch)..$(cat m3.branch) \ + >OUT.output 2>OUT.stderr +' + +test_done From 8960222f7d347fe6aa48da4bffe27b7df1dc0d61 Mon Sep 17 00:00:00 2001 From: Jeff Hostetler Date: Thu, 25 Jul 2019 15:43:50 -0400 Subject: [PATCH 092/104] trace2:gvfs:experiment: increase default event depth for unpack-tree data --- trace2/tr2_tgt_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trace2/tr2_tgt_event.c b/trace2/tr2_tgt_event.c index 6353e8ad915610..e3193651f00697 100644 --- a/trace2/tr2_tgt_event.c +++ b/trace2/tr2_tgt_event.c @@ -33,7 +33,7 @@ static struct tr2_dst tr2dst_event = { TR2_SYSENV_EVENT, 0, 0, 0, 0 }; * event target. Use the TR2_SYSENV_EVENT_NESTING setting to increase * region details in the event target. */ -static int tr2env_event_max_nesting_levels = 2; +static int tr2env_event_max_nesting_levels = 4; /* * Use the TR2_SYSENV_EVENT_BRIEF to omit the