From 19048fdc9ddae275b02cfb9b1de1c35bcfe94cb0 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Thu, 20 Dec 2018 12:11:10 -0500 Subject: [PATCH 1/2] Move fabric streams to a fabric_streams module Streams functionality is fairly isolated from the rest of the utils module so move it to its own. This is mostly in preparation to add a streams workers cleaner process. --- .../src/couch_replicator_fabric.erl | 4 +- src/fabric/src/fabric_streams.erl | 119 ++++++++++++++++++ src/fabric/src/fabric_util.erl | 88 ------------- src/fabric/src/fabric_view_all_docs.erl | 4 +- src/fabric/src/fabric_view_changes.erl | 4 +- src/fabric/src/fabric_view_map.erl | 4 +- src/fabric/src/fabric_view_reduce.erl | 4 +- 7 files changed, 129 insertions(+), 98 deletions(-) create mode 100644 src/fabric/src/fabric_streams.erl diff --git a/src/couch_replicator/src/couch_replicator_fabric.erl b/src/couch_replicator/src/couch_replicator_fabric.erl index 6998b2803fe..1650105b5ad 100644 --- a/src/couch_replicator/src/couch_replicator_fabric.erl +++ b/src/couch_replicator/src/couch_replicator_fabric.erl @@ -27,12 +27,12 @@ docs(DbName, Options, QueryArgs, Callback, Acc) -> Shards, couch_replicator_fabric_rpc, docs, [Options, QueryArgs]), RexiMon = fabric_util:create_monitors(Workers0), try - case fabric_util:stream_start(Workers0, #shard.ref) of + case fabric_streams:start(Workers0, #shard.ref) of {ok, Workers} -> try docs_int(DbName, Workers, QueryArgs, Callback, Acc) after - fabric_util:cleanup(Workers) + fabric_streams:cleanup(Workers) end; {timeout, NewState} -> DefunctWorkers = fabric_util:remove_done_workers( diff --git a/src/fabric/src/fabric_streams.erl b/src/fabric/src/fabric_streams.erl new file mode 100644 index 00000000000..32217c3cfd5 --- /dev/null +++ b/src/fabric/src/fabric_streams.erl @@ -0,0 +1,119 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(fabric_streams). + +-export([ + start/2, + start/4, + cleanup/1 +]). + +-include_lib("fabric/include/fabric.hrl"). +-include_lib("mem3/include/mem3.hrl"). + + +start(Workers, Keypos) -> + start(Workers, Keypos, undefined, undefined). + +start(Workers0, Keypos, StartFun, Replacements) -> + Fun = fun handle_stream_start/3, + Acc = #stream_acc{ + workers = fabric_dict:init(Workers0, waiting), + start_fun = StartFun, + replacements = Replacements + }, + Timeout = fabric_util:request_timeout(), + case rexi_utils:recv(Workers0, Keypos, Fun, Acc, Timeout, infinity) of + {ok, #stream_acc{workers=Workers}} -> + true = fabric_view:is_progress_possible(Workers), + AckedWorkers = fabric_dict:fold(fun(Worker, From, WorkerAcc) -> + rexi:stream_start(From), + [Worker | WorkerAcc] + end, [], Workers), + {ok, AckedWorkers}; + Else -> + Else + end. + + +cleanup(Workers) -> + fabric_util:cleanup(Workers). + + +handle_stream_start({rexi_DOWN, _, {_, NodeRef}, _}, _, St) -> + case fabric_util:remove_down_workers(St#stream_acc.workers, NodeRef) of + {ok, Workers} -> + {ok, St#stream_acc{workers=Workers}}; + error -> + Reason = {nodedown, <<"progress not possible">>}, + {error, Reason} + end; + +handle_stream_start({rexi_EXIT, Reason}, Worker, St) -> + Workers = fabric_dict:erase(Worker, St#stream_acc.workers), + Replacements = St#stream_acc.replacements, + case {fabric_view:is_progress_possible(Workers), Reason} of + {true, _} -> + {ok, St#stream_acc{workers=Workers}}; + {false, {maintenance_mode, _Node}} when Replacements /= undefined -> + % Check if we have replacements for this range + % and start the new workers if so. + case lists:keytake(Worker#shard.range, 1, Replacements) of + {value, {_Range, WorkerReplacements}, NewReplacements} -> + FinalWorkers = lists:foldl(fun(Repl, NewWorkers) -> + NewWorker = (St#stream_acc.start_fun)(Repl), + fabric_dict:store(NewWorker, waiting, NewWorkers) + end, Workers, WorkerReplacements), + % Assert that our replaced worker provides us + % the oppurtunity to make progress. + true = fabric_view:is_progress_possible(FinalWorkers), + NewRefs = fabric_dict:fetch_keys(FinalWorkers), + {new_refs, NewRefs, St#stream_acc{ + workers=FinalWorkers, + replacements=NewReplacements + }}; + false -> + % If we progress isn't possible and we don't have any + % replacements then we're dead in the water. + Error = {nodedown, <<"progress not possible">>}, + {error, Error} + end; + {false, _} -> + {error, fabric_util:error_info(Reason)} + end; + +handle_stream_start(rexi_STREAM_INIT, {Worker, From}, St) -> + case fabric_dict:lookup_element(Worker, St#stream_acc.workers) of + undefined -> + % This worker lost the race with other partition copies, terminate + rexi:stream_cancel(From), + {ok, St}; + waiting -> + % Don't ack the worker yet so they don't start sending us + % rows until we're ready + Workers0 = fabric_dict:store(Worker, From, St#stream_acc.workers), + Workers1 = fabric_view:remove_overlapping_shards(Worker, Workers0), + case fabric_dict:any(waiting, Workers1) of + true -> + {ok, St#stream_acc{workers=Workers1}}; + false -> + {stop, St#stream_acc{workers=Workers1}} + end + end; + +handle_stream_start({ok, ddoc_updated}, _, St) -> + cleanup(St#stream_acc.workers), + {stop, ddoc_updated}; + +handle_stream_start(Else, _, _) -> + exit({invalid_stream_start, Else}). diff --git a/src/fabric/src/fabric_util.erl b/src/fabric/src/fabric_util.erl index e622c6aa056..cc1f1b62203 100644 --- a/src/fabric/src/fabric_util.erl +++ b/src/fabric/src/fabric_util.erl @@ -16,7 +16,6 @@ update_counter/3, remove_ancestors/2, create_monitors/1, kv/2, remove_down_workers/2, doc_id_and_rev/1]). -export([request_timeout/0, attachments_timeout/0, all_docs_timeout/0]). --export([stream_start/2, stream_start/4]). -export([log_timeout/2, remove_done_workers/2]). -export([is_users_db/1, is_replicator_db/1, fake_db/2]). -export([upgrade_mrargs/1]). @@ -51,93 +50,6 @@ submit_jobs(Shards, Module, EndPoint, ExtraArgs) -> cleanup(Workers) -> [rexi:kill(Node, Ref) || #shard{node=Node, ref=Ref} <- Workers]. -stream_start(Workers, Keypos) -> - stream_start(Workers, Keypos, undefined, undefined). - -stream_start(Workers0, Keypos, StartFun, Replacements) -> - Fun = fun handle_stream_start/3, - Acc = #stream_acc{ - workers = fabric_dict:init(Workers0, waiting), - start_fun = StartFun, - replacements = Replacements - }, - Timeout = request_timeout(), - case rexi_utils:recv(Workers0, Keypos, Fun, Acc, Timeout, infinity) of - {ok, #stream_acc{workers=Workers}} -> - true = fabric_view:is_progress_possible(Workers), - AckedWorkers = fabric_dict:fold(fun(Worker, From, WorkerAcc) -> - rexi:stream_start(From), - [Worker | WorkerAcc] - end, [], Workers), - {ok, AckedWorkers}; - Else -> - Else - end. - -handle_stream_start({rexi_DOWN, _, {_, NodeRef}, _}, _, St) -> - case fabric_util:remove_down_workers(St#stream_acc.workers, NodeRef) of - {ok, Workers} -> - {ok, St#stream_acc{workers=Workers}}; - error -> - Reason = {nodedown, <<"progress not possible">>}, - {error, Reason} - end; -handle_stream_start({rexi_EXIT, Reason}, Worker, St) -> - Workers = fabric_dict:erase(Worker, St#stream_acc.workers), - Replacements = St#stream_acc.replacements, - case {fabric_view:is_progress_possible(Workers), Reason} of - {true, _} -> - {ok, St#stream_acc{workers=Workers}}; - {false, {maintenance_mode, _Node}} when Replacements /= undefined -> - % Check if we have replacements for this range - % and start the new workers if so. - case lists:keytake(Worker#shard.range, 1, Replacements) of - {value, {_Range, WorkerReplacements}, NewReplacements} -> - FinalWorkers = lists:foldl(fun(Repl, NewWorkers) -> - NewWorker = (St#stream_acc.start_fun)(Repl), - fabric_dict:store(NewWorker, waiting, NewWorkers) - end, Workers, WorkerReplacements), - % Assert that our replaced worker provides us - % the oppurtunity to make progress. - true = fabric_view:is_progress_possible(FinalWorkers), - NewRefs = fabric_dict:fetch_keys(FinalWorkers), - {new_refs, NewRefs, St#stream_acc{ - workers=FinalWorkers, - replacements=NewReplacements - }}; - false -> - % If we progress isn't possible and we don't have any - % replacements then we're dead in the water. - Error = {nodedown, <<"progress not possible">>}, - {error, Error} - end; - {false, _} -> - {error, fabric_util:error_info(Reason)} - end; -handle_stream_start(rexi_STREAM_INIT, {Worker, From}, St) -> - case fabric_dict:lookup_element(Worker, St#stream_acc.workers) of - undefined -> - % This worker lost the race with other partition copies, terminate - rexi:stream_cancel(From), - {ok, St}; - waiting -> - % Don't ack the worker yet so they don't start sending us - % rows until we're ready - Workers0 = fabric_dict:store(Worker, From, St#stream_acc.workers), - Workers1 = fabric_view:remove_overlapping_shards(Worker, Workers0), - case fabric_dict:any(waiting, Workers1) of - true -> - {ok, St#stream_acc{workers=Workers1}}; - false -> - {stop, St#stream_acc{workers=Workers1}} - end - end; -handle_stream_start({ok, ddoc_updated}, _, St) -> - cleanup(St#stream_acc.workers), - {stop, ddoc_updated}; -handle_stream_start(Else, _, _) -> - exit({invalid_stream_start, Else}). - recv(Workers, Keypos, Fun, Acc0) -> rexi_utils:recv(Workers, Keypos, Fun, Acc0, request_timeout(), infinity). diff --git a/src/fabric/src/fabric_view_all_docs.erl b/src/fabric/src/fabric_view_all_docs.erl index 30c8e8d5190..a404125faeb 100644 --- a/src/fabric/src/fabric_view_all_docs.erl +++ b/src/fabric/src/fabric_view_all_docs.erl @@ -26,12 +26,12 @@ go(DbName, Options, #mrargs{keys=undefined} = QueryArgs, Callback, Acc) -> Shards, fabric_rpc, all_docs, [Options, QueryArgs]), RexiMon = fabric_util:create_monitors(Workers0), try - case fabric_util:stream_start(Workers0, #shard.ref) of + case fabric_streams:start(Workers0, #shard.ref) of {ok, Workers} -> try go(DbName, Options, Workers, QueryArgs, Callback, Acc) after - fabric_util:cleanup(Workers) + fabric_streams:cleanup(Workers) end; {timeout, NewState} -> DefunctWorkers = fabric_util:remove_done_workers( diff --git a/src/fabric/src/fabric_view_changes.erl b/src/fabric/src/fabric_view_changes.erl index 7288f1aa51e..f96bb058d57 100644 --- a/src/fabric/src/fabric_view_changes.erl +++ b/src/fabric/src/fabric_view_changes.erl @@ -166,7 +166,7 @@ send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) -> end, RexiMon = fabric_util:create_monitors(Workers0), try - case fabric_util:stream_start(Workers0, #shard.ref, StartFun, Repls) of + case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls) of {ok, Workers} -> try LiveSeqs = lists:map(fun(W) -> @@ -178,7 +178,7 @@ send_changes(DbName, ChangesArgs, Callback, PackedSeqs, AccIn, Timeout) -> send_changes(DbName, Workers, LiveSeqs, ChangesArgs, Callback, AccIn, Timeout) after - fabric_util:cleanup(Workers) + fabric_streams:cleanup(Workers) end; {timeout, NewState} -> DefunctWorkers = fabric_util:remove_done_workers( diff --git a/src/fabric/src/fabric_view_map.erl b/src/fabric/src/fabric_view_map.erl index b6a3d6f8371..ee51bfe740d 100644 --- a/src/fabric/src/fabric_view_map.erl +++ b/src/fabric/src/fabric_view_map.erl @@ -36,14 +36,14 @@ go(DbName, Options, DDoc, View, Args, Callback, Acc, VInfo) -> Workers0 = fabric_util:submit_jobs(Shards, fabric_rpc, map_view, RPCArgs), RexiMon = fabric_util:create_monitors(Workers0), try - case fabric_util:stream_start(Workers0, #shard.ref, StartFun, Repls) of + case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls) of {ok, ddoc_updated} -> Callback({error, ddoc_updated}, Acc); {ok, Workers} -> try go(DbName, Workers, VInfo, Args, Callback, Acc) after - fabric_util:cleanup(Workers) + fabric_streams:cleanup(Workers) end; {timeout, NewState} -> DefunctWorkers = fabric_util:remove_done_workers( diff --git a/src/fabric/src/fabric_view_reduce.erl b/src/fabric/src/fabric_view_reduce.erl index a74be107337..b2b8a05f020 100644 --- a/src/fabric/src/fabric_view_reduce.erl +++ b/src/fabric/src/fabric_view_reduce.erl @@ -35,14 +35,14 @@ go(DbName, DDoc, VName, Args, Callback, Acc, VInfo) -> Workers0 = fabric_util:submit_jobs(Shards,fabric_rpc,reduce_view,RPCArgs), RexiMon = fabric_util:create_monitors(Workers0), try - case fabric_util:stream_start(Workers0, #shard.ref, StartFun, Repls) of + case fabric_streams:start(Workers0, #shard.ref, StartFun, Repls) of {ok, ddoc_updated} -> Callback({error, ddoc_updated}, Acc); {ok, Workers} -> try go2(DbName, Workers, VInfo, Args, Callback, Acc) after - fabric_util:cleanup(Workers) + fabric_streams:cleanup(Workers) end; {timeout, NewState} -> DefunctWorkers = fabric_util:remove_done_workers( From 41757cd0f8ec48991c0c2fddcb6085b77e636e09 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Thu, 20 Dec 2018 12:19:01 -0500 Subject: [PATCH 2/2] Clean rexi stream workers when coordinator process is killed Sometimes fabric coordinators end up getting brutally terminated [1], and in that case they might never process their `after` clause where their remote rexi workers are killed. Those workers are left lingering around keeping databases active for up to 5 minutes at a time. To prevent that from happening, let coordinators which use streams spawn an auxiliary cleaner process. This process will monitor the main coordinator and if it dies will ensure remote workers are killed, freeing resources immediately. In order not to send 2x the number of kill messages during the normal exit, fabric_util:cleanup() will stop the auxiliary process before continuing. [1] One instance is when the ddoc cache is refreshed: https://github.com/apache/couchdb/blob/master/src/ddoc_cache/src/ddoc_cache_entry.erl#L236 --- src/fabric/src/fabric_streams.erl | 132 ++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) diff --git a/src/fabric/src/fabric_streams.erl b/src/fabric/src/fabric_streams.erl index 32217c3cfd5..ae0c2be55d4 100644 --- a/src/fabric/src/fabric_streams.erl +++ b/src/fabric/src/fabric_streams.erl @@ -22,6 +22,9 @@ -include_lib("mem3/include/mem3.hrl"). +-define(WORKER_CLEANER, fabric_worker_cleaner). + + start(Workers, Keypos) -> start(Workers, Keypos, undefined, undefined). @@ -32,6 +35,7 @@ start(Workers0, Keypos, StartFun, Replacements) -> start_fun = StartFun, replacements = Replacements }, + spawn_worker_cleaner(self(), Workers0), Timeout = fabric_util:request_timeout(), case rexi_utils:recv(Workers0, Keypos, Fun, Acc, Timeout, infinity) of {ok, #stream_acc{workers=Workers}} -> @@ -47,6 +51,16 @@ start(Workers0, Keypos, StartFun, Replacements) -> cleanup(Workers) -> + % Stop the auxiliary cleaner process as we got to the point where cleanup + % happesn in the regular fashion so we don't want to send 2x the number kill + % messages + case get(?WORKER_CLEANER) of + CleanerPid when is_pid(CleanerPid) -> + erase(?WORKER_CLEANER), + exit(CleanerPid, kill); + _ -> + ok + end, fabric_util:cleanup(Workers). @@ -72,6 +86,7 @@ handle_stream_start({rexi_EXIT, Reason}, Worker, St) -> {value, {_Range, WorkerReplacements}, NewReplacements} -> FinalWorkers = lists:foldl(fun(Repl, NewWorkers) -> NewWorker = (St#stream_acc.start_fun)(Repl), + add_worker_to_cleaner(self(), NewWorker), fabric_dict:store(NewWorker, waiting, NewWorkers) end, Workers, WorkerReplacements), % Assert that our replaced worker provides us @@ -117,3 +132,120 @@ handle_stream_start({ok, ddoc_updated}, _, St) -> handle_stream_start(Else, _, _) -> exit({invalid_stream_start, Else}). + + +% Spawn an auxiliary rexi worker cleaner. This will be used in cases +% when the coordinator (request) process is forceably killed and doesn't +% get a chance to process its `after` fabric:clean/1 clause. +spawn_worker_cleaner(Coordinator, Workers) -> + case get(?WORKER_CLEANER) of + undefined -> + Pid = spawn(fun() -> + erlang:monitor(process, Coordinator), + cleaner_loop(Coordinator, Workers) + end), + put(?WORKER_CLEANER, Pid), + Pid; + ExistingCleaner -> + ExistingCleaner + end. + + +cleaner_loop(Pid, Workers) -> + receive + {add_worker, Pid, Worker} -> + cleaner_loop(Pid, [Worker | Workers]); + {'DOWN', _, _, Pid, _} -> + fabric_util:cleanup(Workers) + end. + + +add_worker_to_cleaner(CoordinatorPid, Worker) -> + case get(?WORKER_CLEANER) of + CleanerPid when is_pid(CleanerPid) -> + CleanerPid ! {add_worker, CoordinatorPid, Worker}; + _ -> + ok + end. + + + +-ifdef(TEST). + +-include_lib("eunit/include/eunit.hrl"). + +worker_cleaner_test_() -> + { + "Fabric spawn_worker_cleaner test", { + setup, fun setup/0, fun teardown/1, + fun(_) -> [ + should_clean_workers(), + does_not_fire_if_cleanup_called(), + should_clean_additional_worker_too() + ] end + } + }. + + +should_clean_workers() -> + ?_test(begin + meck:reset(rexi), + erase(?WORKER_CLEANER), + Workers = [ + #shard{node = 'n1', ref = make_ref()}, + #shard{node = 'n2', ref = make_ref()} + ], + {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end), + Cleaner = spawn_worker_cleaner(Coord, Workers), + Ref = erlang:monitor(process, Cleaner), + Coord ! die, + receive {'DOWN', Ref, _, Cleaner, _} -> ok end, + ?assertEqual(2, meck:num_calls(rexi, kill, 2)) + end). + + +does_not_fire_if_cleanup_called() -> + ?_test(begin + meck:reset(rexi), + erase(?WORKER_CLEANER), + Workers = [ + #shard{node = 'n1', ref = make_ref()}, + #shard{node = 'n2', ref = make_ref()} + ], + {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end), + Cleaner = spawn_worker_cleaner(Coord, Workers), + Ref = erlang:monitor(process, Cleaner), + cleanup(Workers), + Coord ! die, + receive {'DOWN', Ref, _, _, _} -> ok end, + % 2 calls would be from cleanup/1 function. If cleanup process fired + % too it would have been 4 calls total. + ?assertEqual(2, meck:num_calls(rexi, kill, 2)) + end). + + +should_clean_additional_worker_too() -> + ?_test(begin + meck:reset(rexi), + erase(?WORKER_CLEANER), + Workers = [ + #shard{node = 'n1', ref = make_ref()} + ], + {Coord, _} = spawn_monitor(fun() -> receive die -> ok end end), + Cleaner = spawn_worker_cleaner(Coord, Workers), + add_worker_to_cleaner(Coord, #shard{node = 'n2', ref = make_ref()}), + Ref = erlang:monitor(process, Cleaner), + Coord ! die, + receive {'DOWN', Ref, _, Cleaner, _} -> ok end, + ?assertEqual(2, meck:num_calls(rexi, kill, 2)) + end). + + +setup() -> + ok = meck:expect(rexi, kill, fun(_, _) -> ok end). + + +teardown(_) -> + meck:unload(). + +-endif.