From 087d2721f1a5fb6f650b759610bebd8315d508c3 Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Thu, 6 Dec 2018 13:47:49 -0500 Subject: [PATCH 01/26] Filter out empty missing_revs results in mem3_rep This avoids needlessly making cross-cluster fabric:update_docs(Db, [], Opts) calls. --- src/mem3/src/mem3_rep.erl | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/mem3/src/mem3_rep.erl b/src/mem3/src/mem3_rep.erl index b65fa7a8623..340bc0e7b1b 100644 --- a/src/mem3/src/mem3_rep.erl +++ b/src/mem3/src/mem3_rep.erl @@ -426,10 +426,14 @@ find_missing_revs(Acc) -> #doc_info{id=Id, revs=RevInfos} = couch_doc:to_doc_info(FDI), {Id, [R || #rev_info{rev=R} <- RevInfos]} end, Infos), - mem3_rpc:get_missing_revs(Node, Name, IdsRevs, [ + Missing = mem3_rpc:get_missing_revs(Node, Name, IdsRevs, [ {io_priority, {internal_repl, Name}}, ?ADMIN_CTX - ]). + ]), + lists:filter(fun + ({_Id, [], _Ancestors}) -> false; + ({_Id, _Revs, _Ancestors}) -> true + end, Missing). chunk_revs(Revs) -> From 53dca954d25d6a44ed4e0b9993ed0dd0ce4394b4 Mon Sep 17 00:00:00 2001 From: wenwl Date: Wed, 28 Nov 2018 21:43:22 +0800 Subject: [PATCH 02/26] Fix function_clause error - fix function_clause error on invalid DB security objects when the request body of PUT db/_security endpoint is not a correct json format Closes #1384 --- src/chttpd/test/chttpd_security_tests.erl | 118 +++++++++++++++++++++- src/couch/src/couch_db.erl | 14 ++- 2 files changed, 126 insertions(+), 6 deletions(-) diff --git a/src/chttpd/test/chttpd_security_tests.erl b/src/chttpd/test/chttpd_security_tests.erl index 737a32e11e6..12a53acf2c2 100644 --- a/src/chttpd/test/chttpd_security_tests.erl +++ b/src/chttpd/test/chttpd_security_tests.erl @@ -116,6 +116,27 @@ all_test_() -> } }. +security_object_validate_test_() -> + { + "chttpd security object validate tests", + { + setup, + fun chttpd_test_util:start_couch/0, fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_return_ok_for_sec_obj_with_roles/1, + fun should_return_ok_for_sec_obj_with_names/1, + fun should_return_ok_for_sec_obj_with_roles_and_names/1, + fun should_return_error_for_sec_obj_with_incorrect_roles_and_names/1, + fun should_return_error_for_sec_obj_with_incorrect_roles/1, + fun should_return_error_for_sec_obj_with_incorrect_names/1 + ] + } + } + }. + should_allow_admin_db_compaction([Url,_UsersUrl]) -> ?_assertEqual(true, begin @@ -126,7 +147,6 @@ should_allow_admin_db_compaction([Url,_UsersUrl]) -> couch_util:get_value(<<"ok">>, InnerJson, undefined) end). - should_allow_valid_password_to_create_user([_Url, UsersUrl]) -> UserDoc = "{\"_id\": \"org.couchdb.user:foo\", \"name\": \"foo\", \"type\": \"user\", \"roles\": [], \"password\": \"bar\"}", @@ -207,3 +227,99 @@ should_disallow_anonymous_db_view_cleanup([Url,_UsersUrl]) -> {InnerJson} = ResultJson, ErrType = couch_util:get_value(<<"error">>, InnerJson), ?_assertEqual(<<"unauthorized">>, ErrType). + +should_return_ok_for_sec_obj_with_roles([Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"roles">>,[<>]}]}}, + {<<"members">>,{[{<<"roles">>,[<>]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, _} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ?_assertEqual(200, Status). + +should_return_ok_for_sec_obj_with_names([Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"names">>,[<>]}]}}, + {<<"members">>,{[{<<"names">>,[<>]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, _} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ?_assertEqual(200, Status). + +should_return_ok_for_sec_obj_with_roles_and_names([Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>, {[{<<"names">>,[<>]}, + {<<"roles">>,[<>]}]}}, + {<<"members">>,{[{<<"names">>,[<>]}, + {<<"roles">>,[<>]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, _} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ?_assertEqual(200, Status). + +should_return_error_for_sec_obj_with_incorrect_roles_and_names( + [Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"names">>,[123]}]}}, + {<<"members">>,{[{<<"roles">>,["foo"]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, RespBody} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ResultJson = ?JSON_DECODE(RespBody), + [ + ?_assertEqual(500, Status), + ?_assertEqual({[ + {<<"error">>,<<"error">>}, + {<<"reason">>,<<"no_majority">>} + ]}, ResultJson) + ]. + +should_return_error_for_sec_obj_with_incorrect_roles([Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"roles">>,[?TEST_ADMIN]}]}}, + {<<"members">>,{[{<<"roles">>,[<>]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, RespBody} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ResultJson = ?JSON_DECODE(RespBody), + [ + ?_assertEqual(500, Status), + ?_assertEqual({[ + {<<"error">>,<<"error">>}, + {<<"reason">>,<<"no_majority">>} + ]}, ResultJson) + ]. + +should_return_error_for_sec_obj_with_incorrect_names([Url,_UsersUrl]) -> + SecurityUrl = lists:concat([Url, "/_security"]), + SecurityProperties = [ + {<<"admins">>,{[{<<"names">>,[<>]}]}}, + {<<"members">>,{[{<<"names">>,[?TEST_MEMBER]}]}} + ], + + Body = jiffy:encode({SecurityProperties}), + {ok, Status, _, RespBody} = test_request:put(SecurityUrl, + [?CONTENT_JSON, ?AUTH], Body), + ResultJson = ?JSON_DECODE(RespBody), + [ + ?_assertEqual(500, Status), + ?_assertEqual({[ + {<<"error">>,<<"error">>}, + {<<"reason">>,<<"no_majority">>} + ]}, ResultJson) + ]. diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 0d435c2ffef..0df04db3a13 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -736,19 +736,23 @@ validate_security_object(SecProps) -> % validate user input validate_names_and_roles({Props}) when is_list(Props) -> - case couch_util:get_value(<<"names">>,Props,[]) of + case couch_util:get_value(<<"names">>, Props, []) of Ns when is_list(Ns) -> [throw("names must be a JSON list of strings") ||N <- Ns, not is_binary(N)], Ns; - _ -> throw("names must be a JSON list of strings") + _ -> + throw("names must be a JSON list of strings") end, - case couch_util:get_value(<<"roles">>,Props,[]) of + case couch_util:get_value(<<"roles">>, Props, []) of Rs when is_list(Rs) -> [throw("roles must be a JSON list of strings") ||R <- Rs, not is_binary(R)], Rs; - _ -> throw("roles must be a JSON list of strings") + _ -> + throw("roles must be a JSON list of strings") end, - ok. + ok; +validate_names_and_roles(_) -> + throw("admins or members must be a JSON list of strings"). get_revs_limit(#db{} = Db) -> couch_db_engine:get_revs_limit(Db). From 0c54ac9ae3a11315616acd528d281769b03eef4b Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Tue, 8 Jan 2019 15:17:06 -0500 Subject: [PATCH 03/26] Fix end_time field in /_replicate response Previously `end_time` was generated converting the start_time to universal, then passing that to `httpd_util:rfc1123_date/1`. However, `rfc1123_date/1` also transates its argument from local to UTC time, that is it accepts input to be in local time format. Fixes #1841 --- src/couch_replicator/src/couch_replicator_scheduler_job.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/couch_replicator/src/couch_replicator_scheduler_job.erl b/src/couch_replicator/src/couch_replicator_scheduler_job.erl index f669d464db3..412ff7d05dd 100644 --- a/src/couch_replicator/src/couch_replicator_scheduler_job.erl +++ b/src/couch_replicator/src/couch_replicator_scheduler_job.erl @@ -735,8 +735,8 @@ do_checkpoint(State) -> {SrcInstanceStartTime, TgtInstanceStartTime} -> couch_log:notice("recording a checkpoint for `~s` -> `~s` at source update_seq ~p", [SourceName, TargetName, NewSeq]), - UniversalStartTime = calendar:now_to_universal_time(ReplicationStartTime), - StartTime = ?l2b(httpd_util:rfc1123_date(UniversalStartTime)), + LocalStartTime = calendar:now_to_local_time(ReplicationStartTime), + StartTime = ?l2b(httpd_util:rfc1123_date(LocalStartTime)), EndTime = ?l2b(httpd_util:rfc1123_date()), NewHistoryEntry = {[ {<<"session_id">>, SessionId}, From 047179af8d19ea69a6b8b1ef2da16c858f366109 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Mon, 14 Jan 2019 18:32:34 -0600 Subject: [PATCH 04/26] Fix fabric_open_doc_revs There was a subtle bug when opening specific revisions in fabric_doc_open_revs due to a race condition between updates being applied across a cluster. The underlying cause here was due to the stemming after a document had been updated more than revs_limit number of times along with concurrent reads to a node that had not yet made the update. To illustrate lets consider a document A which has a revision history from `{N, RevN}` to `{N+1000, RevN+1000}` (assuming revs_limit is the default 1000). If we consider a single node perspective when an update comes in we added the new revision and stem the oldest revision. The docs the revisions on the node would be `{N+1, RevN+1}` to `{N+1001, RevN+1001}`. The bug exists when we attempt to open revisions on a different node that has yet to apply the new update. In this case when fabric_doc_open_revs could be called with `{N+1000, RevN+1000}`. This results in a response from fabric_doc_open_revs that includes two different `{ok, Doc}` results instead of the expected one instance. The reason for this is that one document has revisions `{N+1, RevN+1}` to `{N+1000, RevN+1000}` from the node that has applied the update, while the node without the update responds with revisions `{N, RevN}` to {N+1000, RevN+1000}`. To rephrase that, a node that has applied an update can end up returning a revision path that contains `revs_limit - 1` revisions while a node wihtout the update returns all `revs_limit` revisions. This slight change in the path prevented the responses from being properly combined into a single response. This bug has existed for many years. However, read repair effectively prevents it from being a significant issue by immediately fixing the revision history discrepancy. This was discovered due to the recent bug in read repair during a mixed cluster upgrade to a release including clustered purge. In this situation we end up crashing the design document cache which then leads to all of the design document requests being direct reads which can end up causing cluster nodes to OOM and die. The conditions require a significant number of design document edits coupled with already significant load to those modified design documents. The most direct example observed was a clustered that had a significant number of filtered replications in and out of the cluster. --- src/fabric/src/fabric_doc_open_revs.erl | 116 +++++++++++++++++++----- 1 file changed, 91 insertions(+), 25 deletions(-) diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl index 234b108efd6..dc03f3df69a 100644 --- a/src/fabric/src/fabric_doc_open_revs.erl +++ b/src/fabric/src/fabric_doc_open_revs.erl @@ -243,8 +243,7 @@ format_reply(true, Replies, _) -> tree_format_replies(Replies); format_reply(false, Replies, _) -> - Filtered = filter_reply(Replies), - dict_format_replies(Filtered). + dict_format_replies(Replies). tree_format_replies(RevTree) -> @@ -260,22 +259,59 @@ tree_format_replies(RevTree) -> dict_format_replies(Dict) -> - lists:sort([Reply || {_, {Reply, _}} <- Dict]). - -filter_reply(Replies) -> - AllFoundRevs = lists:foldl(fun - ({{{not_found, missing}, _}, _}, Acc) -> - Acc; - ({{_, {Pos, [Rev | _]}}, _}, Acc) -> - [{Pos, Rev} | Acc] - end, [], Replies), - %% keep not_found replies only for the revs that don't also have doc reply - lists:filter(fun - ({{{not_found, missing}, Rev}, _}) -> - not lists:member(Rev, AllFoundRevs); - (_) -> - true - end, Replies). + Replies0 = [Reply || {_, {Reply, _}} <- Dict], + + AllFoundRevs = lists:foldl(fun(Reply, Acc) -> + case Reply of + {ok, #doc{revs = {Pos, [RevId | _]}}} -> + [{Pos, RevId} | Acc]; + _ -> + Acc + end + end, [], Replies0), + + %% Drop any not_found replies for which we + %% found the revision on a different node. + Replies1 = lists:filter(fun(Reply) -> + case Reply of + {{not_found, missing}, Rev} -> + not lists:member(Rev, AllFoundRevs); + _ -> + true + end + end, Replies0), + + % Remove replies with shorter revision + % paths for a given revision. + collapse_duplicate_revs(Replies1). + + +collapse_duplicate_revs(Replies) -> + % The collapse logic requires that replies are + % sorted so that shorter rev paths are in + % the list just before longer lists. + % + % This somewhat implicitly relies on Erlang's + % sorting of [A, B] < [A, B, C] for all values + % of C. + collapse_duplicate_revs_int(lists:sort(Replies)). + + +collapse_duplicate_revs_int([]) -> + []; + +collapse_duplicate_revs_int([{ok, Doc1}, {ok, Doc2} | Rest]) -> + {D1, R1} = Doc1#doc.revs, + {D2, R2} = Doc2#doc.revs, + Head = case D1 == D2 andalso lists:prefix(R1, R2) of + true -> []; + false -> [{ok, Doc1}] + end, + Head ++ collapse_duplicate_revs([{ok, Doc2} | Rest]); + +collapse_duplicate_revs_int([Reply | Rest]) -> + [Reply | collapse_duplicate_revs(Rest)]. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). @@ -313,7 +349,9 @@ revs() -> [{1,<<"foo">>}, {1,<<"bar">>}, {1,<<"baz">>}]. foo1() -> {ok, #doc{revs = {1, [<<"foo">>]}}}. foo2() -> {ok, #doc{revs = {2, [<<"foo2">>, <<"foo">>]}}}. +foo2stemmed() -> {ok, #doc{revs = {2, [<<"foo2">>]}}}. fooNF() -> {{not_found, missing}, {1,<<"foo">>}}. +foo2NF() -> {{not_found, missing}, {2, <<"foo2">>}}. bar1() -> {ok, #doc{revs = {1, [<<"bar">>]}}}. barNF() -> {{not_found, missing}, {1,<<"bar">>}}. bazNF() -> {{not_found, missing}, {1,<<"baz">>}}. @@ -351,7 +389,10 @@ open_doc_revs_test_() -> check_node_rev_unmodified_on_down_or_exit(), check_not_found_replies_are_removed_when_doc_found(), check_not_found_returned_when_one_of_docs_not_found(), - check_not_found_returned_when_doc_not_found() + check_not_found_returned_when_doc_not_found(), + check_longer_rev_list_returned(), + check_longer_rev_list_not_combined(), + check_not_found_removed_and_longer_rev_list() ] }. @@ -685,24 +726,49 @@ check_node_rev_unmodified_on_down_or_exit() -> check_not_found_replies_are_removed_when_doc_found() -> ?_test(begin Replies = replies_to_dict([foo1(), bar1(), fooNF()]), - Expect = replies_to_dict([foo1(), bar1()]), - ?assertEqual(Expect, filter_reply(Replies)) + Expect = [bar1(), foo1()], + ?assertEqual(Expect, dict_format_replies(Replies)) end). check_not_found_returned_when_one_of_docs_not_found() -> ?_test(begin Replies = replies_to_dict([foo1(), foo2(), barNF()]), - Expect = replies_to_dict([foo1(), foo2(), barNF()]), - ?assertEqual(Expect, filter_reply(Replies)) + Expect = [foo1(), foo2(), barNF()], + ?assertEqual(Expect, dict_format_replies(Replies)) end). check_not_found_returned_when_doc_not_found() -> ?_test(begin Replies = replies_to_dict([fooNF(), barNF(), bazNF()]), - Expect = replies_to_dict([fooNF(), barNF(), bazNF()]), - ?assertEqual(Expect, filter_reply(Replies)) + Expect = [barNF(), bazNF(), fooNF()], + ?assertEqual(Expect, dict_format_replies(Replies)) end). +check_longer_rev_list_returned() -> + ?_test(begin + Replies = replies_to_dict([foo2(), foo2stemmed()]), + Expect = [foo2()], + ?assertEqual(2, length(Replies)), + ?assertEqual(Expect, dict_format_replies(Replies)) + end). + +check_longer_rev_list_not_combined() -> + ?_test(begin + Replies = replies_to_dict([foo2(), foo2stemmed(), bar1()]), + Expect = [bar1(), foo2()], + ?assertEqual(3, length(Replies)), + ?assertEqual(Expect, dict_format_replies(Replies)) + end). + +check_not_found_removed_and_longer_rev_list() -> + ?_test(begin + Replies = replies_to_dict([foo2(), foo2stemmed(), foo2NF()]), + Expect = [foo2()], + ?assertEqual(3, length(Replies)), + ?assertEqual(Expect, dict_format_replies(Replies)) + end). + + replies_to_dict(Replies) -> [reply_to_element(R) || R <- Replies]. From c95a40d2af58d151a1505cf29650bbca74eee5b2 Mon Sep 17 00:00:00 2001 From: jiangph Date: Thu, 17 Jan 2019 17:50:30 +0800 Subject: [PATCH 05/26] Support one purge request with more than 100 docid COUCHDB-3226 --- src/chttpd/test/chttpd_purge_tests.erl | 21 +++++++++++++++++++++ src/fabric/src/fabric_doc_purge.erl | 2 +- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/chttpd/test/chttpd_purge_tests.erl b/src/chttpd/test/chttpd_purge_tests.erl index b3acb0668db..50f7e18e5c4 100644 --- a/src/chttpd/test/chttpd_purge_tests.erl +++ b/src/chttpd/test/chttpd_purge_tests.erl @@ -70,6 +70,7 @@ purge_test_() -> [ fun test_empty_purge_request/1, fun test_ok_purge_request/1, + fun test_ok_purge_request_with_101_docid/1, fun test_accepted_purge_request/1, fun test_partial_purge_request/1, fun test_mixed_purge_request/1, @@ -137,6 +138,26 @@ test_ok_purge_request(Url) -> end). +test_ok_purge_request_with_101_docid(Url) -> + ?_test(begin + PurgedDocsNum = 101, + IdsRevsEJson = lists:foldl(fun(I, CIdRevs) -> + Id = list_to_binary(integer_to_list(I)), + {ok, _, _, Body} = create_doc(Url, Id), + {Json} = ?JSON_DECODE(Body), + Rev = couch_util:get_value(<<"rev">>, Json, undefined), + [{Id, [Rev]} | CIdRevs] + end, [], lists:seq(1, PurgedDocsNum)), + + IdsRevs = binary_to_list(?JSON_ENCODE({IdsRevsEJson})), + ok = config:set("purge", "max_document_id_number", "101"), + {ok, Status, _, _} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], IdsRevs), + ok = config:delete("purge", "max_document_id_number"), + ?assert(Status =:= 201 orelse Status =:= 202) + end). + + test_accepted_purge_request(Url) -> ?_test(begin {ok, _, _, Body} = create_doc(Url, "doc1"), diff --git a/src/fabric/src/fabric_doc_purge.erl b/src/fabric/src/fabric_doc_purge.erl index 2571d0d7f0b..7e447ff1bc4 100644 --- a/src/fabric/src/fabric_doc_purge.erl +++ b/src/fabric/src/fabric_doc_purge.erl @@ -191,7 +191,7 @@ format_resps(UUIDs, #acc{} = Acc) -> [{UUID, {Health, AllRevs}} | ReplyAcc] end end, - FinalReplies = dict:fold(FoldFun, {ok, []}, Resps), + FinalReplies = dict:fold(FoldFun, [], Resps), couch_util:reorder_results(UUIDs, FinalReplies); format_resps(_UUIDs, Else) -> From d01dbc4a8eabb8490bdda087cc11923e8b3fe9f2 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Fri, 18 Jan 2019 10:52:24 -0600 Subject: [PATCH 06/26] Fix timeout in chttpd_purge_tests --- src/chttpd/test/chttpd_purge_tests.erl | 36 ++++++++++++++++++-------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/src/chttpd/test/chttpd_purge_tests.erl b/src/chttpd/test/chttpd_purge_tests.erl index 50f7e18e5c4..dbd73de1fe4 100644 --- a/src/chttpd/test/chttpd_purge_tests.erl +++ b/src/chttpd/test/chttpd_purge_tests.erl @@ -52,6 +52,11 @@ create_doc(Url, Id, Content) -> [?CONTENT_JSON, ?AUTH], "{\"mr\": \"" ++ Content ++ "\"}"). +create_docs(Url, Docs) -> + test_request:post(Url ++ "/_bulk_docs", + [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({[{docs, Docs}]})). + + delete_db(Url) -> {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). @@ -141,20 +146,29 @@ test_ok_purge_request(Url) -> test_ok_purge_request_with_101_docid(Url) -> ?_test(begin PurgedDocsNum = 101, - IdsRevsEJson = lists:foldl(fun(I, CIdRevs) -> + Docs = lists:foldl(fun(I, Acc) -> Id = list_to_binary(integer_to_list(I)), - {ok, _, _, Body} = create_doc(Url, Id), - {Json} = ?JSON_DECODE(Body), - Rev = couch_util:get_value(<<"rev">>, Json, undefined), - [{Id, [Rev]} | CIdRevs] - end, [], lists:seq(1, PurgedDocsNum)), + Doc = {[{<<"_id">>, Id}, {value, I}]}, + [Doc | Acc] + end, [], lists:seq(1, PurgedDocsNum)), + + {ok, _, _, Body} = create_docs(Url, Docs), + BodyJson = ?JSON_DECODE(Body), + + PurgeBody = lists:map(fun({DocResp}) -> + Id = couch_util:get_value(<<"id">>, DocResp, undefined), + Rev = couch_util:get_value(<<"rev">>, DocResp, undefined), + {Id, [Rev]} + end, BodyJson), - IdsRevs = binary_to_list(?JSON_ENCODE({IdsRevsEJson})), ok = config:set("purge", "max_document_id_number", "101"), - {ok, Status, _, _} = test_request:post(Url ++ "/_purge/", - [?CONTENT_JSON, ?AUTH], IdsRevs), - ok = config:delete("purge", "max_document_id_number"), - ?assert(Status =:= 201 orelse Status =:= 202) + try + {ok, Status, _, _} = test_request:post(Url ++ "/_purge/", + [?CONTENT_JSON, ?AUTH], ?JSON_ENCODE({PurgeBody})), + ?assert(Status =:= 201 orelse Status =:= 202) + after + ok = config:delete("purge", "max_document_id_number") + end end). From d167f70e821dd85af35ecea525838bf2f9e8f65a Mon Sep 17 00:00:00 2001 From: Joan Touzet Date: Fri, 18 Jan 2019 15:16:19 -0500 Subject: [PATCH 07/26] Add new /{db}/_sync_shards endpoint (admin-only) (#1811) This server admin-only endpoint forces an n-way sync of all shards across all nodes on which they are hosted. This can be useful for an administrator adding a new node to the cluster, after updating _dbs so that the new node hosts an existing db with content, to force the new node to sync all of that db's shards. Users may want to bump their `[mem3] sync_concurrency` value to a larger figure for the duration of the shards sync. Closes #1807 --- src/chttpd/src/chttpd_auth_request.erl | 2 ++ src/mem3/src/mem3_httpd.erl | 20 +++++++++++++++++++- src/mem3/src/mem3_httpd_handlers.erl | 1 + 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl index 9110ed6bc4a..f85eb9722aa 100644 --- a/src/chttpd/src/chttpd_auth_request.erl +++ b/src/chttpd/src/chttpd_auth_request.erl @@ -70,6 +70,8 @@ authorize_request_int(#httpd{path_parts=[_DbName, <<"_compact">>|_]}=Req) -> require_db_admin(Req); authorize_request_int(#httpd{path_parts=[_DbName, <<"_view_cleanup">>]}=Req) -> require_db_admin(Req); +authorize_request_int(#httpd{path_parts=[_DbName, <<"_sync_shards">>]}=Req) -> + require_admin(Req); authorize_request_int(#httpd{path_parts=[_DbName|_]}=Req) -> db_authorization_check(Req). diff --git a/src/mem3/src/mem3_httpd.erl b/src/mem3/src/mem3_httpd.erl index 571f0637025..c922141b1c7 100644 --- a/src/mem3/src/mem3_httpd.erl +++ b/src/mem3/src/mem3_httpd.erl @@ -12,7 +12,8 @@ -module(mem3_httpd). --export([handle_membership_req/1, handle_shards_req/2]). +-export([handle_membership_req/1, handle_shards_req/2, + handle_sync_req/2]). %% includes -include_lib("mem3/include/mem3.hrl"). @@ -52,6 +53,16 @@ handle_shards_req(#httpd{path_parts=[_DbName, <<"_shards">>]}=Req, _Db) -> handle_shards_req(#httpd{path_parts=[_DbName, <<"_shards">>, _DocId]}=Req, _Db) -> chttpd:send_method_not_allowed(Req, "GET"). +handle_sync_req(#httpd{method='POST', + path_parts=[_DbName, <<"_sync_shards">>]} = Req, Db) -> + DbName = mem3:dbname(couch_db:name(Db)), + ShardList = [S#shard.name || S <- mem3:ushards(DbName)], + [ sync_shard(S) || S <- ShardList ], + chttpd:send_json(Req, 202, {[{ok, true}]}); +handle_sync_req(Req, _) -> + chttpd:send_method_not_allowed(Req, "POST"). + + %% %% internal %% @@ -64,3 +75,10 @@ json_shards([#shard{node=Node, range=[B,E]} | Rest], AccIn) -> HexEnd = couch_util:to_hex(<>), Range = list_to_binary(HexBeg ++ "-" ++ HexEnd), json_shards(Rest, dict:append(Range, Node, AccIn)). + +sync_shard(ShardName) -> + Shards = mem3_shards:for_shard_name(ShardName), + [rpc:call(S1#shard.node, mem3_sync, push, [S1, S2#shard.node]) || + S1 <- Shards, S2 <- Shards, S1 =/= S2], + ok. + diff --git a/src/mem3/src/mem3_httpd_handlers.erl b/src/mem3/src/mem3_httpd_handlers.erl index d8e138c15f7..7cbd9fe5f20 100644 --- a/src/mem3/src/mem3_httpd_handlers.erl +++ b/src/mem3/src/mem3_httpd_handlers.erl @@ -18,6 +18,7 @@ url_handler(<<"_membership">>) -> fun mem3_httpd:handle_membership_req/1; url_handler(_) -> no_match. db_handler(<<"_shards">>) -> fun mem3_httpd:handle_shards_req/2; +db_handler(<<"_sync_shards">>) -> fun mem3_httpd:handle_sync_req/2; db_handler(_) -> no_match. design_handler(_) -> no_match. From bec41ac7b9674b3e24d3e8dd1a5b7f12be4ad9f9 Mon Sep 17 00:00:00 2001 From: Garren Smith Date: Thu, 29 Nov 2018 08:57:50 +0200 Subject: [PATCH 08/26] add default fabric request timeouts --- rel/overlay/etc/default.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index a77add4bdc7..c517de5f8c3 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -189,6 +189,11 @@ port = 6984 ; changes_duration = ; shard_timeout_factor = 2 ; uuid_prefix_len = 7 +; request_timeout = 60000 +; all_docs_timeout = 10000 +; attachments_timeout = 60000 +; view_timeout = 3600000 +; partition_view_timeout = 3600000 ; [rexi] ; buffer_count = 2000 From 40a13504421a53c8b1028a11bbd5d6ab26ed437f Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Tue, 22 Jan 2019 11:02:34 -0500 Subject: [PATCH 09/26] Update to mochiweb 2.19.0 It has a fix to revert user socket buffer size to 8192 and also allow setting this buffer values directly (not necessarily via {recbuf, ...}). Fixes #1810 Warning: 2.19.0 blacklists a series of OTP releases: 21.2, 21.2.1, 21.2.2 This is done via a runtime check of the ssl application version. The blacklist seems valid as there is a bug which prevents data from being delivered on TSL sockets. That could affect either CouchDB server side (chttpd) or replication client side (ibrowse). --- .travis.yml | 2 +- rebar.config.script | 2 +- .../test/chttpd_socket_buffer_size_test.erl | 163 ++++++++++++++++++ 3 files changed, 165 insertions(+), 2 deletions(-) create mode 100644 src/chttpd/test/chttpd_socket_buffer_size_test.erl diff --git a/.travis.yml b/.travis.yml index a8ed1d5a831..e7a44fb7eee 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ os: linux dist: trusty otp_release: - - 21.1 + - 21.2.3 - 20.3 - 19.3 - 18.3 diff --git a/rebar.config.script b/rebar.config.script index 5f17c29e0ca..a582c04cf0c 100644 --- a/rebar.config.script +++ b/rebar.config.script @@ -65,7 +65,7 @@ DepDescs = [ {hyper, "hyper", {tag, "CouchDB-2.2.0-4"}}, {ibrowse, "ibrowse", {tag, "CouchDB-4.0.1"}}, {jiffy, "jiffy", {tag, "CouchDB-0.14.11-2"}}, -{mochiweb, "mochiweb", {tag, "CouchDB-v2.18.0-1"}}, +{mochiweb, "mochiweb", {tag, "v2.19.0"}}, {meck, "meck", {tag, "0.8.8"}} ], diff --git a/src/chttpd/test/chttpd_socket_buffer_size_test.erl b/src/chttpd/test/chttpd_socket_buffer_size_test.erl new file mode 100644 index 00000000000..650bf9b0b8d --- /dev/null +++ b/src/chttpd/test/chttpd_socket_buffer_size_test.erl @@ -0,0 +1,163 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(chttpd_socket_buffer_size_test). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "chttpd_db_socket_buffer_size_test_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). + + +setup() -> + Hashed = couch_passwords:hash_admin_password(?PASS), + ok = config:set("admins", ?USER, ?b2l(Hashed), _Persist=false), + SocketOptions = config:get("chttpd", "socket_options"), + Db = ?tempdb(), + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), + Url = "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(Db), + create_db(Url), + {Db, SocketOptions}. + + +teardown({Db, SocketOptions}) -> + delete_db(url(Db)), + ok = config:delete("chttpd", "socket_options", _Persist=false), + ok = config:delete("admins", ?USER, _Persist=false), + case SocketOptions of + undefined -> + ok; + _ -> + ok = config:set("chttpd", "socket_options", SocketOptions) + end. + + +socket_buffer_size_test_() -> + { + "chttpd socket_buffer_size_test", + { + setup, + fun chttpd_test_util:start_couch/0, + fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun buffer_too_small_url_fails/1, + fun buffer_too_small_header_fails/1, + fun recbuf_too_small_url_fails/1, + fun recbuf_too_small_header_fails/1, + fun default_buffer_settings_work/1 + ] + } + } + }. + + +buffer_too_small_url_fails({Db, _}) -> + ?_test(begin + restart_chttpd("[{buffer, 1024}]"), + Id = data(1500), + Status1 = put_req(url(Db) ++ "/" ++ Id, "{}"), + ?assertEqual(400, Status1), + restart_chttpd("[{buffer, 2048}]"), + Status2 = put_req(url(Db) ++ "/" ++ Id, "{}"), + ?assert(Status2 =:= 201 orelse Status2 =:= 202) + end). + + +buffer_too_small_header_fails({Db, _}) -> + ?_test(begin + restart_chttpd("[{buffer, 1024}]"), + Headers = [{"Blah", data(1500)}], + Status1 = put_req(url(Db) ++ "/d", Headers, "{}"), + ?assertEqual(400, Status1), + restart_chttpd("[{buffer, 2048}]"), + Status2 = put_req(url(Db) ++ "/d", Headers, "{}"), + ?assert(Status2 =:= 201 orelse Status2 =:= 202) + end). + + +recbuf_too_small_url_fails({Db, _}) -> + ?_test(begin + restart_chttpd("[{recbuf, 1024}]"), + Id = data(1500), + Status1 = put_req(url(Db) ++ "/" ++ Id, "{}"), + ?assertEqual(400, Status1), + restart_chttpd("[{recbuf, 2048}]"), + Status2 = put_req(url(Db) ++ "/" ++ Id, "{}"), + ?assert(Status2 =:= 201 orelse Status2 =:= 202) + end). + + +recbuf_too_small_header_fails({Db, _}) -> + ?_test(begin + restart_chttpd("[{recbuf, 1024}]"), + Headers = [{"Blah", data(1500)}], + Status1 = put_req(url(Db) ++ "/d", Headers, "{}"), + ?assertEqual(400, Status1), + restart_chttpd("[{recbuf, 2048}]"), + Status2 = put_req(url(Db) ++ "/d", Headers, "{}"), + ?assert(Status2 =:= 201 orelse Status2 =:= 202) + end). + + +default_buffer_settings_work({Db, _}) -> + ?_test(begin + restart_chttpd("[{recbuf, undefined}]"), + Id = data(7000), + Status = put_req(url(Db) ++ "/" ++ Id, "{}"), + ?assert(Status =:= 201 orelse Status =:= 202) + end). + + +% Helper functions + +url(Db) -> + Addr = config:get("chttpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), + "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(Db). + + +create_db(Url) -> + Status = put_req(Url ++ "?q=1&n=1", "{}"), + ?assert(Status =:= 201 orelse Status =:= 202). + + +delete_db(Url) -> + {ok, 200, _, _} = test_request:delete(Url, [?AUTH]). + + +put_req(Url, Body) -> + put_req(Url, [], Body). + + +put_req(Url, Headers, Body) -> + AllHeaders = Headers ++ [?CONTENT_JSON, ?AUTH], + {ok, Status, _, _} = test_request:put(Url, AllHeaders, Body), + Status. + + +data(Size) -> + string:copies("x", Size). + + +restart_chttpd(ServerOptions) -> + ok = application:stop(chttpd), + ok = application:stop(mochiweb), + config:set("chttpd", "server_options", ServerOptions, _Persist=false), + ok = application:start(mochiweb), + ok = application:start(chttpd). From eb676ff6775965a7766021a1146d3f8b447a2021 Mon Sep 17 00:00:00 2001 From: jiangph Date: Mon, 28 Jan 2019 10:43:34 +0800 Subject: [PATCH 10/26] restrict _purge to server admin This restrict _purge and _purged_infos_limit to server admin in terms of the security level required to run them. Fixes #1799 --- src/chttpd/src/chttpd_auth_request.erl | 4 ++ src/chttpd/test/chttpd_security_tests.erl | 61 ++++++++++++++++++++++- 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/src/chttpd/src/chttpd_auth_request.erl b/src/chttpd/src/chttpd_auth_request.erl index f85eb9722aa..5b4ec84d55d 100644 --- a/src/chttpd/src/chttpd_auth_request.erl +++ b/src/chttpd/src/chttpd_auth_request.erl @@ -72,6 +72,10 @@ authorize_request_int(#httpd{path_parts=[_DbName, <<"_view_cleanup">>]}=Req) -> require_db_admin(Req); authorize_request_int(#httpd{path_parts=[_DbName, <<"_sync_shards">>]}=Req) -> require_admin(Req); +authorize_request_int(#httpd{path_parts=[_DbName, <<"_purge">>]}=Req) -> + require_admin(Req); +authorize_request_int(#httpd{path_parts=[_DbName, <<"_purged_infos_limit">>]}=Req) -> + require_admin(Req); authorize_request_int(#httpd{path_parts=[_DbName|_]}=Req) -> db_authorization_check(Req). diff --git a/src/chttpd/test/chttpd_security_tests.erl b/src/chttpd/test/chttpd_security_tests.erl index 12a53acf2c2..955b4ff01f4 100644 --- a/src/chttpd/test/chttpd_security_tests.erl +++ b/src/chttpd/test/chttpd_security_tests.erl @@ -110,7 +110,13 @@ all_test_() -> fun should_allow_admin_view_compaction/1, fun should_disallow_anonymous_view_compaction/1, fun should_allow_admin_db_view_cleanup/1, - fun should_disallow_anonymous_db_view_cleanup/1 + fun should_disallow_anonymous_db_view_cleanup/1, + fun should_allow_admin_purge/1, + fun should_disallow_anonymous_purge/1, + fun should_disallow_db_member_purge/1, + fun should_allow_admin_purged_infos_limit/1, + fun should_disallow_anonymous_purged_infos_limit/1, + fun should_disallow_db_member_purged_infos_limit/1 ] } } @@ -228,6 +234,59 @@ should_disallow_anonymous_db_view_cleanup([Url,_UsersUrl]) -> ErrType = couch_util:get_value(<<"error">>, InnerJson), ?_assertEqual(<<"unauthorized">>, ErrType). +should_allow_admin_purge([Url,_UsersUrl]) -> + ?_assertEqual(null, + begin + IdsRevs = "{}", + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge", + [?CONTENT_JSON, ?AUTH], IdsRevs), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + couch_util:get_value(<<"purge_seq">>, InnerJson, undefined) + end). + +should_disallow_anonymous_purge([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge", + [?CONTENT_JSON], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>, ErrType). + +should_disallow_db_member_purge([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:post(Url ++ "/_purge", + [?CONTENT_JSON, ?TEST_MEMBER_AUTH], ""), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>,ErrType). + +should_allow_admin_purged_infos_limit([Url,_UsersUrl]) -> + ?_assertEqual(true, + begin + {ok, _, _, ResultBody} = test_request:put(Url + ++ "/_purged_infos_limit/", [?CONTENT_JSON, ?AUTH], "2"), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + couch_util:get_value(<<"ok">>, InnerJson, undefined) + end). + +should_disallow_anonymous_purged_infos_limit([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:put(Url ++ "/_purged_infos_limit/", + [?CONTENT_JSON, ?TEST_MEMBER_AUTH], "2"), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>, ErrType). + +should_disallow_db_member_purged_infos_limit([Url,_UsersUrl]) -> + {ok, _, _, ResultBody} = test_request:put(Url ++ "/_purged_infos_limit/", + [?CONTENT_JSON, ?TEST_MEMBER_AUTH], "2"), + ResultJson = ?JSON_DECODE(ResultBody), + {InnerJson} = ResultJson, + ErrType = couch_util:get_value(<<"error">>, InnerJson), + ?_assertEqual(<<"unauthorized">>,ErrType). + should_return_ok_for_sec_obj_with_roles([Url,_UsersUrl]) -> SecurityUrl = lists:concat([Url, "/_security"]), SecurityProperties = [ From d681748c109d91ced7e89a9266cb80c6b22307e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrien=20Verg=C3=A9?= Date: Thu, 31 Jan 2019 01:06:21 +0100 Subject: [PATCH 11/26] Compaction: Add snooze_period_ms for finer tuning (#1880) This commit introduces a new option `snooze_period_ms` (measured in milliseconds), and deprecates `snooze_period` while still supporting it for obvious legacy reasons. --- rel/overlay/etc/default.ini | 7 +- src/couch/src/couch_compaction_daemon.erl | 71 +++++++++++++++++-- .../test/couchdb_compaction_daemon_tests.erl | 2 +- 3 files changed, 72 insertions(+), 8 deletions(-) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index c517de5f8c3..a20bbd0f872 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -420,9 +420,10 @@ min_file_size = 131072 ; With lots of databases and/or with lots of design docs in one or more ; databases, the compaction_daemon can create significant CPU load when ; checking whether databases and view indexes need compacting. The -; snooze_period setting ensures a smoother CPU load. Defaults to -; 3 seconds wait. -; snooze_period = 3 +; snooze_period_ms setting ensures a smoother CPU load. Defaults to +; 3000 milliseconds wait. Note that this option was formerly called +; snooze_period, measured in seconds (it is currently still supported). +; snooze_period_ms = 3000 [compactions] ; List of compaction rules for the compaction daemon. diff --git a/src/couch/src/couch_compaction_daemon.erl b/src/couch/src/couch_compaction_daemon.erl index fea505e4231..d49cb6b116c 100644 --- a/src/couch/src/couch_compaction_daemon.erl +++ b/src/couch/src/couch_compaction_daemon.erl @@ -125,8 +125,16 @@ handle_config_terminate(_, stop, _) -> handle_config_terminate(_Server, _Reason, _State) -> erlang:send_after(?RELISTEN_DELAY, whereis(?MODULE), restart_config_listener). +get_snooze_period() -> + % The snooze_period_ms option should be used, but snooze_period is supported + % for legacy reasons. + Default = config:get_integer("compaction_daemon", "snooze_period", 3), + case config:get_integer("compaction_daemon", "snooze_period_ms", -1) of + -1 -> Default * 1000; + SnoozePeriod -> SnoozePeriod + end. + compact_loop(Parent) -> - SnoozePeriod = config:get_integer("compaction_daemon", "snooze_period", 3), {ok, _} = couch_server:all_databases( fun(DbName, Acc) -> case ets:info(?CONFIG_ETS, size) =:= 0 of @@ -140,7 +148,7 @@ compact_loop(Parent) -> case check_period(Config) of true -> maybe_compact_db(Parent, DbName, Config), - ok = timer:sleep(SnoozePeriod * 1000); + ok = timer:sleep(get_snooze_period()); false -> ok end @@ -231,8 +239,7 @@ maybe_compact_views(DbName, [DDocName | Rest], Config) -> timeout -> ok end, - SnoozePeriod = config:get_integer("compaction_daemon", "snooze_period", 3), - ok = timer:sleep(SnoozePeriod * 1000); + ok = timer:sleep(get_snooze_period()); false -> ok end. @@ -597,4 +604,60 @@ abs_path2_test() -> ?assertEqual({ok, "/a/b/"}, abs_path2("/a/b")), ok. +get_snooze_period_test_() -> + { + foreach, + fun() -> + meck:new(config, [passthrough]) + end, + fun(_) -> + meck:unload() + end, + [ + {"should return default value without config attributes", + fun should_default_without_config/0}, + {"should respect old config attribute", + fun should_respect_old_config/0}, + {"should respect old config set to zero", + fun should_respect_old_config_zero/0}, + {"should respect new config attribute", + fun should_respect_new_config/0}, + {"should respect new config set to zero", + fun should_respect_new_config_zero/0} + ] + }. + +should_default_without_config() -> + ?assertEqual(3000, get_snooze_period()). + +should_respect_old_config() -> + meck:expect(config, get_integer, fun + ("compaction_daemon", "snooze_period", _) -> 1; + (_, _, Default) -> Default + end), + ?assertEqual(1000, get_snooze_period()). + +should_respect_old_config_zero() -> + meck:expect(config, get_integer, fun + ("compaction_daemon", "snooze_period", _) -> 0; + (_, _, Default) -> Default + end), + ?assertEqual(0, get_snooze_period()). + +should_respect_new_config() -> + meck:expect(config, get_integer, fun + ("compaction_daemon", "snooze_period", _) -> 1; + ("compaction_daemon", "snooze_period_ms", _) -> 300; + (_, _, Default) -> Default + end), + ?assertEqual(300, get_snooze_period()). + +should_respect_new_config_zero() -> + meck:expect(config, get_integer, fun + ("compaction_daemon", "snooze_period", _) -> 1; + ("compaction_daemon", "snooze_period_ms", _) -> 0; + (_, _, Default) -> Default + end), + ?assertEqual(0, get_snooze_period()). + -endif. diff --git a/src/couch/test/couchdb_compaction_daemon_tests.erl b/src/couch/test/couchdb_compaction_daemon_tests.erl index c10ddee1253..0ef2a406414 100644 --- a/src/couch/test/couchdb_compaction_daemon_tests.erl +++ b/src/couch/test/couchdb_compaction_daemon_tests.erl @@ -24,7 +24,7 @@ start() -> Ctx = test_util:start_couch(), ok = config:set("compaction_daemon", "check_interval", "3", false), - ok = config:set("compaction_daemon", "snooze_period", "0", false), + ok = config:set("compaction_daemon", "snooze_period_ms", "0", false), ok = config:set("compaction_daemon", "min_file_size", "100000", false), ok = config:delete("compactions", "_default", false), ok = meck:new(?MODS_TO_MOCK, [passthrough]), From d06641b570f1390ef97596ff9ba9c2cadbe1ce16 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Mon, 4 Feb 2019 22:51:18 -0400 Subject: [PATCH 12/26] Fix badarg crash on invalid rev for individual doc update --- src/couch/src/couch_doc.erl | 8 ++++---- src/couch/test/couch_db_doc_tests.erl | 21 ++++++++++++++++++++- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl index e5ad9e9f198..6717c9b0f4b 100644 --- a/src/couch/src/couch_doc.erl +++ b/src/couch/src/couch_doc.erl @@ -179,12 +179,12 @@ parse_rev(Rev) when is_list(Rev) -> SplitRev = lists:splitwith(fun($-) -> false; (_) -> true end, Rev), case SplitRev of {Pos, [$- | RevId]} -> - IntPos = try list_to_integer(Pos) of - Val -> Val + try + IntPos = list_to_integer(Pos), + {IntPos, parse_revid(RevId)} catch error:badarg -> throw({bad_request, <<"Invalid rev format">>}) - end, - {IntPos, parse_revid(RevId)}; + end; _Else -> throw({bad_request, <<"Invalid rev format">>}) end; parse_rev(_BadRev) -> diff --git a/src/couch/test/couch_db_doc_tests.erl b/src/couch/test/couch_db_doc_tests.erl index aa9c6fd7172..cdcf81d1510 100644 --- a/src/couch/test/couch_db_doc_tests.erl +++ b/src/couch/test/couch_db_doc_tests.erl @@ -42,7 +42,8 @@ couch_db_doc_test_() -> foreach, fun setup/0, fun teardown/1, [ - fun should_truncate_number_of_revisions/1 + fun should_truncate_number_of_revisions/1, + fun should_raise_bad_request_on_invalid_rev/1 ] } } @@ -59,6 +60,24 @@ should_truncate_number_of_revisions(DbName) -> ?_assertEqual(5, length(Revs)). +should_raise_bad_request_on_invalid_rev(DbName) -> + DocId = <<"foo">>, + InvalidRev1 = <<"foo">>, + InvalidRev2 = <<"a-foo">>, + InvalidRev3 = <<"1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>, + Expect = {bad_request, <<"Invalid rev format">>}, + Db = open_db(DbName), + create_doc(Db, DocId), + [ + {InvalidRev1, + ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev1, 1))}, + {InvalidRev2, + ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev2, 1))}, + {InvalidRev3, + ?_assertThrow(Expect, add_revisions(Db, DocId, InvalidRev3, 1))} + ]. + + open_db(DbName) -> {ok, Db} = couch_db:open_int(DbName, [?ADMIN_CTX]), Db. From bc31155e82bb33435c501076954c6b5d97e5738d Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Fri, 1 Feb 2019 12:41:54 -0400 Subject: [PATCH 13/26] Fix from_json_obj_validate crash when provided rev isn't a valid hex --- src/couch/src/couch_doc.erl | 13 ++++++++++--- src/couch/test/couch_doc_json_tests.erl | 6 ++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/couch/src/couch_doc.erl b/src/couch/src/couch_doc.erl index 6717c9b0f4b..4a49372c756 100644 --- a/src/couch/src/couch_doc.erl +++ b/src/couch/src/couch_doc.erl @@ -275,9 +275,16 @@ transfer_fields([{<<"_revisions">>, {Props}} | Rest], Doc, DbName) -> true -> ok end, - [throw({doc_validation, "RevId isn't a string"}) || - RevId <- RevIds, not is_binary(RevId)], - RevIds2 = [parse_revid(RevId) || RevId <- RevIds], + RevIds2 = lists:map(fun(RevId) -> + try + parse_revid(RevId) + catch + error:function_clause -> + throw({doc_validation, "RevId isn't a string"}); + error:badarg -> + throw({doc_validation, "RevId isn't a valid hexadecimal"}) + end + end, RevIds), transfer_fields(Rest, Doc#doc{revs={Start, RevIds2}}, DbName); transfer_fields([{<<"_deleted">>, B} | Rest], Doc, DbName) when is_boolean(B) -> diff --git a/src/couch/test/couch_doc_json_tests.erl b/src/couch/test/couch_doc_json_tests.erl index bcff0646a57..b9e3d01e9fe 100644 --- a/src/couch/test/couch_doc_json_tests.erl +++ b/src/couch/test/couch_doc_json_tests.erl @@ -267,6 +267,12 @@ from_json_error_cases() -> {doc_validation, "RevId isn't a string"}, "Revision ids must be strings." }, + { + {[{<<"_revisions">>, {[{<<"start">>, 0}, + {<<"ids">>, [<<"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">>]}]}}]}, + {doc_validation, "RevId isn't a valid hexadecimal"}, + "Revision ids must be a valid hex." + }, { {[{<<"_something">>, 5}]}, {doc_validation, <<"Bad special document member: _something">>}, From aeb7772a802ff59c9ba5828951321b10f3f09c58 Mon Sep 17 00:00:00 2001 From: Joan Touzet Date: Fri, 30 Nov 2018 16:07:14 -0500 Subject: [PATCH 14/26] dev/run: do not create needless dev/data/ dir --- dev/run | 1 - 1 file changed, 1 deletion(-) diff --git a/dev/run b/dev/run index a4fbfbf8d34..1fb8575f4a4 100755 --- a/dev/run +++ b/dev/run @@ -163,7 +163,6 @@ def setup_context(opts, args): @log('Setup environment') def setup_dirs(ctx): - ensure_dir_exists(ctx['devdir'], 'data') ensure_dir_exists(ctx['devdir'], 'logs') From 7c1ed3eab8eccc6da35a9818f6c1469b377582e7 Mon Sep 17 00:00:00 2001 From: Joan Touzet Date: Thu, 6 Dec 2018 18:03:57 -0500 Subject: [PATCH 15/26] Format and check all code using python black (#1776) The Makefile target builds a python3 venv at .venv and installs black if possible. Since black is Python 3.6 and up only, we skip the check on systems with an older Python 3.x. --- .gitignore | 1 + .travis.yml | 8 +- Makefile | 29 +- Makefile.win | 28 +- build-aux/logfile-uploader.py | 104 +-- dev/pbkdf2.py | 103 +-- dev/run | 481 ++++++++------ rel/overlay/bin/couchup | 593 ++++++++++-------- src/mango/test/01-index-crud-test.py | 83 ++- src/mango/test/02-basic-find-test.py | 217 +++---- src/mango/test/03-operator-test.py | 216 ++----- src/mango/test/04-key-tests.py | 73 ++- src/mango/test/05-index-selection-test.py | 230 ++++--- src/mango/test/06-basic-text-test.py | 171 ++--- src/mango/test/06-text-default-field-test.py | 10 +- .../test/07-text-custom-field-list-test.py | 51 +- src/mango/test/08-text-limit-test.py | 28 +- src/mango/test/09-text-sort-test.py | 40 +- .../10-disable-array-length-field-test.py | 28 +- src/mango/test/11-ignore-design-docs-test.py | 20 +- src/mango/test/12-use-correct-index-test.py | 54 +- src/mango/test/13-stable-update-test.py | 5 +- src/mango/test/13-users-db-find-test.py | 15 +- src/mango/test/14-json-pagination-test.py | 271 ++++---- src/mango/test/15-execution-stats-test.py | 25 +- src/mango/test/16-index-selectors-test.py | 229 +++---- src/mango/test/17-multi-type-value-test.py | 33 +- src/mango/test/18-json-sort.py | 140 +---- src/mango/test/19-find-conflicts.py | 24 +- src/mango/test/20-no-timeout-test.py | 16 +- src/mango/test/friend_docs.py | 520 +++------------ src/mango/test/limit_docs.py | 455 +++----------- src/mango/test/mango.py | 126 ++-- src/mango/test/user_docs.py | 297 +++------ test/javascript/run | 111 ++-- 35 files changed, 2022 insertions(+), 2813 deletions(-) diff --git a/.gitignore b/.gitignore index d6a766b4768..8e9ca151e8d 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ *.pyc *.swp *~ +.venv .DS_Store .rebar/ .eunit/ diff --git a/.travis.yml b/.travis.yml index e7a44fb7eee..b9c75c0ac80 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,6 +11,8 @@ otp_release: addons: apt: + sources: + - deadsnakes packages: - build-essential - curl @@ -18,10 +20,10 @@ addons: - libicu-dev - libmozjs185-dev - pkg-config - - python3 + - python3.6 + - python3.6-venv - python3-requests - python3-sphinx - - python3.4-venv # - sphinx-rtd-theme - help2man - shunit2 @@ -50,6 +52,8 @@ before_script: - rm -rf /tmp/couchjslogs - mkdir -p /tmp/couchjslogs - ./configure -c --disable-docs --disable-fauxton + - python3.6 -m venv /tmp/.venv + - source /tmp/.venv/bin/activate script: - make check diff --git a/Makefile b/Makefile index c258b16f0d3..b2f3f04aa48 100644 --- a/Makefile +++ b/Makefile @@ -140,6 +140,7 @@ check: all @$(MAKE) test-cluster-without-quorum @$(MAKE) eunit @$(MAKE) javascript + @$(MAKE) python-black @$(MAKE) mango-test # @$(MAKE) build-test @@ -171,6 +172,27 @@ soak-eunit: couch @$(REBAR) setup_eunit 2> /dev/null while [ $$? -eq 0 ] ; do $(REBAR) -r eunit $(EUNIT_OPTS) ; done +.venv/bin/black: + @python3 -m venv .venv + @.venv/bin/pip3 install black || touch .venv/bin/black + +# Python code formatter - only runs if we're on Python 3.6 or greater +python-black: .venv/bin/black + @python3 -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \ + echo "Python formatter not supported on Python < 3.6; check results on a newer platform" + @python3 -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \ + LC_ALL=C.UTF-8 LANG=C.UTF-8 .venv/bin/black --check \ + --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \ + . dev/run rel/overlay/bin/couchup test/javascript/run + +python-black-update: .venv/bin/black + @python3 -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \ + echo "Python formatter not supported on Python < 3.6; check results on a newer platform" + @python3 -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \ + LC_ALL=C.UTF-8 LANG=C.UTF-8 .venv/bin/black \ + --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \ + . dev/run rel/overlay/bin/couchup test/javascript/run + .PHONY: elixir elixir: elixir-check-formatted @rm -rf dev/lib @@ -285,9 +307,9 @@ build-test: # target: mango-test - Run Mango tests mango-test: devclean all @cd src/mango && \ - python3 -m venv venv && \ - venv/bin/pip3 install -r requirements.txt - @cd src/mango && ../../dev/run -n 1 --admin=testuser:testpass venv/bin/nosetests + python3 -m venv .venv && \ + .venv/bin/pip3 install -r requirements.txt + @cd src/mango && ../../dev/run -n 1 --admin=testuser:testpass .venv/bin/nosetests ################################################################################ # Developing @@ -400,6 +422,7 @@ clean: @rm -rf src/couch/priv/{couchspawnkillable,couchjs} @rm -rf share/server/main.js share/server/main-coffee.js @rm -rf tmp dev/data dev/lib dev/logs + @rm -rf src/mango/.venv @rm -f src/couch/priv/couchspawnkillable @rm -f src/couch/priv/couch_js/config.h @rm -f dev/boot_node.beam dev/pbkdf2.pyc log/crash.log diff --git a/Makefile.win b/Makefile.win index e57763b720d..fc552500c48 100644 --- a/Makefile.win +++ b/Makefile.win @@ -119,6 +119,7 @@ check: all @$(MAKE) -f Makefile.win test-cluster-without-quorum @$(MAKE) -f Makefile.win eunit @$(MAKE) -f Makefile.win javascript + @$(MAKE) -f Makefile.win python-black @$(MAKE) -f Makefile.win mango-test # @$(MAKE) -f Makefile.win elixir @@ -142,6 +143,26 @@ just-eunit: export ERL_AFLAGS = $(shell echo "-config rel/files/eunit.config") just-eunit: @$(REBAR) -r eunit $(EUNIT_OPTS) +.venv/bin/black: + @python.exe -m venv .venv + @.venv\Scripts\pip3.exe install black || copy /b .venv\Scripts\black.exe +,, + +# Python code formatter - only runs if we're on Python 3.6 or greater +python-black: .venv/bin/black + @python.exe -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \ + echo "Python formatter not supported on Python < 3.6; check results on a newer platform" + @python.exe -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \ + .venv\Scripts\black.exe --check \ + --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \ + . dev\run rel\overlay\bin\couchup test\javascript\run + +python-black-update: .venv/bin/black + @python.exe -c "import sys; exit(1 if sys.version_info < (3,6) else 0)" || \ + echo "Python formatter not supported on Python < 3.6; check results on a newer platform" + @python.exe -c "import sys; exit(1 if sys.version_info >= (3,6) else 0)" || \ + .venv\Scripts\black.exe \ + --exclude="build/|buck-out/|dist/|_build/|\.git/|\.hg/|\.mypy_cache/|\.nox/|\.tox/|\.venv/|src/rebar/pr2relnotes.py|src/fauxton" \ + . dev\run rel\overlay\bin\couchup test\javascript\run .PHONY: elixir elixir: elixir-check-formatted @@ -205,9 +226,9 @@ endif .PHONY: mango-test mango-test: devclean all @cd src\mango && \ - python.exe -m venv venv && \ - venv\Scripts\pip.exe install -r requirements.txt - @cd src\mango && venv\Scripts\python.exe ..\..\dev\run -n 1 --admin=testuser:testpass venv\Scripts\nosetests + python.exe -m venv .venv && \ + .venv\Scripts\pip.exe install -r requirements.txt + @cd src\mango && .venv\Scripts\python.exe ..\..\dev\run -n 1 --admin=testuser:testpass .venv\Scripts\nosetests .PHONY: check-qs @@ -330,6 +351,7 @@ clean: -@rmdir /s/q dev\data -@rmdir /s/q dev\lib -@rmdir /s/q dev\logs + -@rmdir /s/q src\mango\.venv -@del /f/q src\couch\priv\couch_js\config.h -@del /f/q dev\boot_node.beam dev\pbkdf2.pyc log\crash.log diff --git a/build-aux/logfile-uploader.py b/build-aux/logfile-uploader.py index c95eab53229..2d90fa4ae0d 100755 --- a/build-aux/logfile-uploader.py +++ b/build-aux/logfile-uploader.py @@ -13,7 +13,6 @@ # the License. - import datetime import glob import json @@ -26,96 +25,111 @@ COUCH_URL = "https://couchdb-vm2.apache.org/ci_errorlogs" TARFILE = "couchlog.tar.gz" + def _tojson(req): """Support requests v0.x as well as 1.x+""" - if requests.__version__[0] == '0': + if requests.__version__[0] == "0": return json.loads(req.content) return req.json() + def collect_logfiles(): """ Find and tarball all logfiles """ - tb = tarfile.open(name=TARFILE, mode='w:gz') + tb = tarfile.open(name=TARFILE, mode="w:gz") # EUnit - for log in glob.glob('src/*/.eunit/couch.log'): + for log in glob.glob("src/*/.eunit/couch.log"): tb.add(log) # JS harness - for log in glob.glob('dev/logs/node1.log'): + for log in glob.glob("dev/logs/node1.log"): tb.add(log) # couchjs OS process IO logs - for log in glob.glob('/tmp/couchjslogs/*'): + for log in glob.glob("/tmp/couchjslogs/*"): tb.add(log) tb.close() + def build_ci_doc(): """ Build a metadata document with relevant detail from CI env """ doc = {} - if 'TRAVIS' in os.environ: - doc['builder'] = 'travis' - doc['build_id'] = os.environ['TRAVIS_JOB_ID'] - doc['erlang'] = os.environ['TRAVIS_OTP_RELEASE'] - doc['url'] = 'https://travis-ci.org/apache/couchdb/jobs/' + \ - os.environ['TRAVIS_JOB_ID'] - doc['branch'] = os.environ['TRAVIS_BRANCH'] - doc['commit'] = os.environ['TRAVIS_COMMIT'] - doc['repo'] = 'https://github.com/' + os.environ['TRAVIS_REPO_SLUG'] - elif 'JENKINS_URL' in os.environ: - doc['builder'] = 'jenkins' - doc['build_id'] = os.environ['BUILD_NUMBER'] - doc['url'] = os.environ['BUILD_URL'] - doc['branch'] = os.environ['BRANCH_NAME'] - doc['repo'] = 'https://github.com/apache/couchdb' + if "TRAVIS" in os.environ: + doc["builder"] = "travis" + doc["build_id"] = os.environ["TRAVIS_JOB_ID"] + doc["erlang"] = os.environ["TRAVIS_OTP_RELEASE"] + doc["url"] = ( + "https://travis-ci.org/apache/couchdb/jobs/" + os.environ["TRAVIS_JOB_ID"] + ) + doc["branch"] = os.environ["TRAVIS_BRANCH"] + doc["commit"] = os.environ["TRAVIS_COMMIT"] + doc["repo"] = "https://github.com/" + os.environ["TRAVIS_REPO_SLUG"] + elif "JENKINS_URL" in os.environ: + doc["builder"] = "jenkins" + doc["build_id"] = os.environ["BUILD_NUMBER"] + doc["url"] = os.environ["BUILD_URL"] + doc["branch"] = os.environ["BRANCH_NAME"] + doc["repo"] = "https://github.com/apache/couchdb" else: - doc['builder'] = 'manual' + doc["builder"] = "manual" # TODO: shell out to get correct repo, commit, branch info? - doc['repo'] = 'https://github.com/apache/couchdb' - doc['build_id'] = str(time.time()) + doc["repo"] = "https://github.com/apache/couchdb" + doc["build_id"] = str(time.time()) # shorten doc id - repo = doc['repo'].split('/')[-1] - repo = repo.replace('.git', '') - - doc['_id'] = doc['builder'] + '-' + repo + '-' + \ - doc['build_id'] + \ - '-' + datetime.datetime.utcnow().isoformat() + repo = doc["repo"].split("/")[-1] + repo = repo.replace(".git", "") + + doc["_id"] = ( + doc["builder"] + + "-" + + repo + + "-" + + doc["build_id"] + + "-" + + datetime.datetime.utcnow().isoformat() + ) return doc + def upload_logs(): try: - lp = os.environ['COUCHAUTH'].split(':') + lp = os.environ["COUCHAUTH"].split(":") except KeyError as e: - print ("ERROR: COUCHAUTH credentials unavailable! " - "Unable to upload logfiles.") + print("ERROR: COUCHAUTH credentials unavailable! " "Unable to upload logfiles.") exit(1) creds = (lp[0], lp[1]) doc = build_ci_doc() - req = requests.post(COUCH_URL, + req = requests.post( + COUCH_URL, data=json.dumps(doc), auth=creds, - headers={'Content-type': 'application/json'}) + headers={"Content-type": "application/json"}, + ) req.raise_for_status() req = _tojson(req) - with open(TARFILE, 'rb') as f: + with open(TARFILE, "rb") as f: # ancient versions of requests break if data is iterable fdata = f.read() - req2 = requests.put(COUCH_URL + '/' + doc['_id'] + '/' + TARFILE, - headers={'Content-type': 'application/x-gtar'}, + req2 = requests.put( + COUCH_URL + "/" + doc["_id"] + "/" + TARFILE, + headers={"Content-type": "application/x-gtar"}, auth=creds, - params={'rev': req['rev']}, - data=fdata) + params={"rev": req["rev"]}, + data=fdata, + ) req2.raise_for_status() return req2 def main(): """ Find latest logfile and upload to Couch logfile db. """ - print ("Uploading logfiles...") + print("Uploading logfiles...") collect_logfiles() req = upload_logs() - print (req.url.split('?')[0]) - print (req.content) - print ("Upload complete!") + print(req.url.split("?")[0]) + print(req.content) + print("Upload complete!") + -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/dev/pbkdf2.py b/dev/pbkdf2.py index 6a297ef85a7..4416f8632a1 100644 --- a/dev/pbkdf2.py +++ b/dev/pbkdf2.py @@ -59,10 +59,10 @@ def safe_str_cmp(a, b): text_type = unicode -_pack_int = Struct('>I').pack +_pack_int = Struct(">I").pack -def bytes_(s, encoding='utf8', errors='strict'): +def bytes_(s, encoding="utf8", errors="strict"): if isinstance(s, text_type): return s.encode(encoding, errors) return s @@ -72,7 +72,7 @@ def hexlify_(s): if PY3: return str(hexlify(s), encoding="utf8") else: - return s.encode('hex') + return s.encode("hex") def range_(*args): @@ -103,6 +103,7 @@ def _pseudorandom(x, mac=mac): return [x for x in h.digest()] else: return map(ord, h.digest()) + buf = [] for block in range_(1, -(-keylen // mac.digest_size) + 1): rv = u = _pseudorandom(bytes_(salt) + _pack_int(block)) @@ -110,13 +111,13 @@ def _pseudorandom(x, mac=mac): if PY3: u = _pseudorandom(bytes(u)) else: - u = _pseudorandom(''.join(map(chr, u))) + u = _pseudorandom("".join(map(chr, u))) rv = starmap(xor, zip(rv, u)) buf.extend(rv) if PY3: return bytes(buf)[:keylen] else: - return ''.join(map(chr, buf))[:keylen] + return "".join(map(chr, buf))[:keylen] def test(): @@ -125,48 +126,76 @@ def test(): def check(data, salt, iterations, keylen, expected): rv = pbkdf2_hex(data, salt, iterations, keylen) if rv != expected: - print('Test failed:') - print(' Expected: %s' % expected) - print(' Got: %s' % rv) - print(' Parameters:') - print(' data=%s' % data) - print(' salt=%s' % salt) - print(' iterations=%d' % iterations) + print("Test failed:") + print(" Expected: %s" % expected) + print(" Got: %s" % rv) + print(" Parameters:") + print(" data=%s" % data) + print(" salt=%s" % salt) + print(" iterations=%d" % iterations) failed.append(1) # From RFC 6070 - check('password', 'salt', 1, 20, - '0c60c80f961f0e71f3a9b524af6012062fe037a6') - check('password', 'salt', 2, 20, - 'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957') - check('password', 'salt', 4096, 20, - '4b007901b765489abead49d926f721d065a429c1') - check('passwordPASSWORDpassword', 'saltSALTsaltSALTsaltSALTsaltSALTsalt', - 4096, 25, '3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038') - check('pass\x00word', 'sa\x00lt', 4096, 16, - '56fa6aa75548099dcc37d7f03425e0c3') + check("password", "salt", 1, 20, "0c60c80f961f0e71f3a9b524af6012062fe037a6") + check("password", "salt", 2, 20, "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957") + check("password", "salt", 4096, 20, "4b007901b765489abead49d926f721d065a429c1") + check( + "passwordPASSWORDpassword", + "saltSALTsaltSALTsaltSALTsaltSALTsalt", + 4096, + 25, + "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038", + ) + check("pass\x00word", "sa\x00lt", 4096, 16, "56fa6aa75548099dcc37d7f03425e0c3") # This one is from the RFC but it just takes for ages ##check('password', 'salt', 16777216, 20, ## 'eefe3d61cd4da4e4e9945b3d6ba2158c2634e984') # From Crypt-PBKDF2 - check('password', 'ATHENA.MIT.EDUraeburn', 1, 16, - 'cdedb5281bb2f801565a1122b2563515') - check('password', 'ATHENA.MIT.EDUraeburn', 1, 32, - 'cdedb5281bb2f801565a1122b25635150ad1f7a04bb9f3a333ecc0e2e1f70837') - check('password', 'ATHENA.MIT.EDUraeburn', 2, 16, - '01dbee7f4a9e243e988b62c73cda935d') - check('password', 'ATHENA.MIT.EDUraeburn', 2, 32, - '01dbee7f4a9e243e988b62c73cda935da05378b93244ec8f48a99e61ad799d86') - check('password', 'ATHENA.MIT.EDUraeburn', 1200, 32, - '5c08eb61fdf71e4e4ec3cf6ba1f5512ba7e52ddbc5e5142f708a31e2e62b1e13') - check('X' * 64, 'pass phrase equals block size', 1200, 32, - '139c30c0966bc32ba55fdbf212530ac9c5ec59f1a452f5cc9ad940fea0598ed1') - check('X' * 65, 'pass phrase exceeds block size', 1200, 32, - '9ccad6d468770cd51b10e6a68721be611a8b4d282601db3b36be9246915ec82a') + check( + "password", "ATHENA.MIT.EDUraeburn", 1, 16, "cdedb5281bb2f801565a1122b2563515" + ) + check( + "password", + "ATHENA.MIT.EDUraeburn", + 1, + 32, + "cdedb5281bb2f801565a1122b25635150ad1f7a04bb9f3a333ecc0e2e1f70837", + ) + check( + "password", "ATHENA.MIT.EDUraeburn", 2, 16, "01dbee7f4a9e243e988b62c73cda935d" + ) + check( + "password", + "ATHENA.MIT.EDUraeburn", + 2, + 32, + "01dbee7f4a9e243e988b62c73cda935da05378b93244ec8f48a99e61ad799d86", + ) + check( + "password", + "ATHENA.MIT.EDUraeburn", + 1200, + 32, + "5c08eb61fdf71e4e4ec3cf6ba1f5512ba7e52ddbc5e5142f708a31e2e62b1e13", + ) + check( + "X" * 64, + "pass phrase equals block size", + 1200, + 32, + "139c30c0966bc32ba55fdbf212530ac9c5ec59f1a452f5cc9ad940fea0598ed1", + ) + check( + "X" * 65, + "pass phrase exceeds block size", + 1200, + 32, + "9ccad6d468770cd51b10e6a68721be611a8b4d282601db3b36be9246915ec82a", + ) raise SystemExit(bool(failed)) -if __name__ == '__main__': +if __name__ == "__main__": test() diff --git a/dev/run b/dev/run index 1fb8575f4a4..94f6e5d2f96 100755 --- a/dev/run +++ b/dev/run @@ -50,6 +50,7 @@ def toposixpath(path): else: return path + def log(msg): def decorator(func): @functools.wraps(func) @@ -58,32 +59,37 @@ def log(msg): if log.verbose: sys.stdout.write(chars) sys.stdout.flush() + argnames = list(inspect.signature(func).parameters.keys()) callargs = dict(list(zip(argnames, args))) callargs.update(kwargs) - print_('[ * ] ' + msg.format(**callargs) + ' ... ') + print_("[ * ] " + msg.format(**callargs) + " ... ") try: res = func(*args, **kwargs) except KeyboardInterrupt: - print_('ok\n') + print_("ok\n") except Exception as err: - print_('failed: %s\n' % err) + print_("failed: %s\n" % err) raise else: - print_('ok\n') + print_("ok\n") return res + return wrapper + return decorator + + log.verbose = True def main(): ctx = setup() startup(ctx) - if ctx['cmd']: - run_command(ctx, ctx['cmd']) + if ctx["cmd"]: + run_command(ctx, ctx["cmd"]) else: - join(ctx, 15984, *ctx['admin']) + join(ctx, 15984, *ctx["admin"]) def setup(): @@ -97,73 +103,124 @@ def setup(): def setup_logging(ctx): - log.verbose = ctx['verbose'] + log.verbose = ctx["verbose"] def setup_argparse(): - parser = optparse.OptionParser(description='Runs CouchDB 2.0 dev cluster') - parser.add_option('-a', '--admin', metavar='USER:PASS', default=None, - help="Add an admin account to the development cluster") - parser.add_option("-n", "--nodes", metavar="nodes", default=3, - type=int, - help="Number of development nodes to be spun up") - parser.add_option("-q", "--quiet", - action="store_false", dest="verbose", default=True, - help="Don't print anything to STDOUT") - parser.add_option('--with-admin-party-please', - dest='with_admin_party', default=False, - action='store_true', - help='Runs a dev cluster with admin party mode on') - parser.add_option('--enable-erlang-views', - action='store_true', - help='Enables the Erlang view server') - parser.add_option('--no-join', - dest='no_join', default=False, - action='store_true', - help='Do not join nodes on boot') - parser.add_option('--with-haproxy', dest='with_haproxy', default=False, - action='store_true', help='Use HAProxy') - parser.add_option('--haproxy', dest='haproxy', default='haproxy', - help='HAProxy executable path') - parser.add_option('--haproxy-port', dest='haproxy_port', default='5984', - help='HAProxy port') - parser.add_option('--node-number', dest="node_number", type=int, default=1, - help='The node number to seed them when creating the node(s)') - parser.add_option('-c', '--config-overrides', action="append", default=[], - help='Optional key=val config overrides. Can be repeated') - parser.add_option('--degrade-cluster', dest="degrade_cluster",type=int, default=0, - help='The number of nodes that should be stopped after cluster config') - parser.add_option('--no-eval', action='store_true', default=False, - help='Do not eval subcommand output') + parser = optparse.OptionParser(description="Runs CouchDB 2.0 dev cluster") + parser.add_option( + "-a", + "--admin", + metavar="USER:PASS", + default=None, + help="Add an admin account to the development cluster", + ) + parser.add_option( + "-n", + "--nodes", + metavar="nodes", + default=3, + type=int, + help="Number of development nodes to be spun up", + ) + parser.add_option( + "-q", + "--quiet", + action="store_false", + dest="verbose", + default=True, + help="Don't print anything to STDOUT", + ) + parser.add_option( + "--with-admin-party-please", + dest="with_admin_party", + default=False, + action="store_true", + help="Runs a dev cluster with admin party mode on", + ) + parser.add_option( + "--enable-erlang-views", + action="store_true", + help="Enables the Erlang view server", + ) + parser.add_option( + "--no-join", + dest="no_join", + default=False, + action="store_true", + help="Do not join nodes on boot", + ) + parser.add_option( + "--with-haproxy", + dest="with_haproxy", + default=False, + action="store_true", + help="Use HAProxy", + ) + parser.add_option( + "--haproxy", dest="haproxy", default="haproxy", help="HAProxy executable path" + ) + parser.add_option( + "--haproxy-port", dest="haproxy_port", default="5984", help="HAProxy port" + ) + parser.add_option( + "--node-number", + dest="node_number", + type=int, + default=1, + help="The node number to seed them when creating the node(s)", + ) + parser.add_option( + "-c", + "--config-overrides", + action="append", + default=[], + help="Optional key=val config overrides. Can be repeated", + ) + parser.add_option( + "--degrade-cluster", + dest="degrade_cluster", + type=int, + default=0, + help="The number of nodes that should be stopped after cluster config", + ) + parser.add_option( + "--no-eval", + action="store_true", + default=False, + help="Do not eval subcommand output", + ) return parser.parse_args() def setup_context(opts, args): fpath = os.path.abspath(__file__) - return {'N': opts.nodes, - 'no_join': opts.no_join, - 'with_admin_party': opts.with_admin_party, - 'enable_erlang_views': opts.enable_erlang_views, - 'admin': opts.admin.split(':', 1) if opts.admin else None, - 'nodes': ['node%d' % (i + opts.node_number) for i in range(opts.nodes)], - 'node_number': opts.node_number, - 'degrade_cluster': opts.degrade_cluster, - 'devdir': os.path.dirname(fpath), - 'rootdir': os.path.dirname(os.path.dirname(fpath)), - 'cmd': ' '.join(args), - 'verbose': opts.verbose, - 'with_haproxy': opts.with_haproxy, - 'haproxy': opts.haproxy, - 'haproxy_port': opts.haproxy_port, - 'config_overrides': opts.config_overrides, - 'no_eval': opts.no_eval, - 'reset_logs': True, - 'procs': []} - - -@log('Setup environment') + return { + "N": opts.nodes, + "no_join": opts.no_join, + "with_admin_party": opts.with_admin_party, + "enable_erlang_views": opts.enable_erlang_views, + "admin": opts.admin.split(":", 1) if opts.admin else None, + "nodes": ["node%d" % (i + opts.node_number) for i in range(opts.nodes)], + "node_number": opts.node_number, + "degrade_cluster": opts.degrade_cluster, + "devdir": os.path.dirname(fpath), + "rootdir": os.path.dirname(os.path.dirname(fpath)), + "cmd": " ".join(args), + "verbose": opts.verbose, + "with_haproxy": opts.with_haproxy, + "haproxy": opts.haproxy, + "haproxy_port": opts.haproxy_port, + "config_overrides": opts.config_overrides, + "no_eval": opts.no_eval, + "reset_logs": True, + "procs": [], + } + + +@log("Setup environment") def setup_dirs(ctx): - ensure_dir_exists(ctx['devdir'], 'logs') + ensure_dir_exists(ctx["devdir"], "logs") def ensure_dir_exists(root, *segments): @@ -173,42 +230,44 @@ def ensure_dir_exists(root, *segments): return path -@log('Ensure CouchDB is built') +@log("Ensure CouchDB is built") def check_beams(ctx): - for fname in glob.glob(os.path.join(ctx['devdir'], "*.erl")): - sp.check_call(["erlc", "-o", ctx['devdir'] + os.sep, fname]) + for fname in glob.glob(os.path.join(ctx["devdir"], "*.erl")): + sp.check_call(["erlc", "-o", ctx["devdir"] + os.sep, fname]) -@log('Prepare configuration files') +@log("Prepare configuration files") def setup_configs(ctx): if os.path.exists("src/fauxton/dist/release"): fauxton_root = "src/fauxton/dist/release" else: fauxton_root = "share/www" - for idx, node in enumerate(ctx['nodes']): - cluster_port, backend_port = get_ports(idx + ctx['node_number']) + for idx, node in enumerate(ctx["nodes"]): + cluster_port, backend_port = get_ports(idx + ctx["node_number"]) env = { - "prefix": toposixpath(ctx['rootdir']), + "prefix": toposixpath(ctx["rootdir"]), "package_author_name": "The Apache Software Foundation", - "data_dir": toposixpath(ensure_dir_exists(ctx['devdir'], - "lib", node, "data")), - "view_index_dir": toposixpath(ensure_dir_exists(ctx['devdir'], - "lib", node, "data")), + "data_dir": toposixpath( + ensure_dir_exists(ctx["devdir"], "lib", node, "data") + ), + "view_index_dir": toposixpath( + ensure_dir_exists(ctx["devdir"], "lib", node, "data") + ), "node_name": "-name %s@127.0.0.1" % node, "cluster_port": cluster_port, "backend_port": backend_port, "fauxton_root": fauxton_root, "uuid": "fake_uuid_for_dev", "_default": "", - "compaction_daemon": "{}" + "compaction_daemon": "{}", } write_config(ctx, node, env) def apply_config_overrides(ctx, content): - for kv_str in ctx['config_overrides']: - key, val = kv_str.split('=') + for kv_str in ctx["config_overrides"]: + key, val = kv_str.split("=") key, val = key.strip(), val.strip() match = "[;=]{0,2}%s.*" % key repl = "%s = %s" % (key, val) @@ -222,8 +281,8 @@ def get_ports(idnode): def write_config(ctx, node, env): - etc_src = os.path.join(ctx['rootdir'], "rel", "overlay", "etc") - etc_tgt = ensure_dir_exists(ctx['devdir'], "lib", node, "etc") + etc_src = os.path.join(ctx["rootdir"], "rel", "overlay", "etc") + etc_tgt = ensure_dir_exists(ctx["devdir"], "lib", node, "etc") for fname in glob.glob(os.path.join(etc_src, "*")): base = os.path.basename(fname) @@ -249,37 +308,29 @@ def write_config(ctx, node, env): def boot_haproxy(ctx): - if not ctx['with_haproxy']: + if not ctx["with_haproxy"]: return - config = os.path.join(ctx['rootdir'], "rel", "haproxy.cfg") - cmd = [ - ctx['haproxy'], - "-f", - config - ] - logfname = os.path.join(ctx['devdir'], "logs", "haproxy.log") + config = os.path.join(ctx["rootdir"], "rel", "haproxy.cfg") + cmd = [ctx["haproxy"], "-f", config] + logfname = os.path.join(ctx["devdir"], "logs", "haproxy.log") log = open(logfname, "w") env = os.environ.copy() if "HAPROXY_PORT" not in env: - env["HAPROXY_PORT"] = ctx['haproxy_port'] + env["HAPROXY_PORT"] = ctx["haproxy_port"] return sp.Popen( - " ".join(cmd), - shell=True, - stdin=sp.PIPE, - stdout=log, - stderr=sp.STDOUT, - env=env - ) + " ".join(cmd), shell=True, stdin=sp.PIPE, stdout=log, stderr=sp.STDOUT, env=env + ) def hack_default_ini(ctx, node, contents): - if ctx['enable_erlang_views']: + if ctx["enable_erlang_views"]: contents = re.sub( "^\[native_query_servers\]$", "[native_query_servers]\nerlang = {couch_native_process, start_link, []}", contents, - flags=re.MULTILINE) + flags=re.MULTILINE, + ) return contents @@ -290,15 +341,15 @@ def hack_local_ini(ctx, contents): previous_line = "; require_valid_user = false\n" contents = contents.replace(previous_line, previous_line + secret_line) - if ctx['with_admin_party']: - ctx['admin'] = ('Admin Party!', 'You do not need any password.') + if ctx["with_admin_party"]: + ctx["admin"] = ("Admin Party!", "You do not need any password.") return contents # handle admin credentials passed from cli or generate own one - if ctx['admin'] is None: - ctx['admin'] = user, pswd = 'root', gen_password() + if ctx["admin"] is None: + ctx["admin"] = user, pswd = "root", gen_password() else: - user, pswd = ctx['admin'] + user, pswd = ctx["admin"] return contents + "\n%s = %s" % (user, hashify(pswd)) @@ -327,50 +378,54 @@ def startup(ctx): atexit.register(kill_processes, ctx) boot_nodes(ctx) ensure_all_nodes_alive(ctx) - if ctx['no_join']: + if ctx["no_join"]: return - if ctx['with_admin_party']: + if ctx["with_admin_party"]: cluster_setup_with_admin_party(ctx) else: cluster_setup(ctx) - if ctx['degrade_cluster'] > 0: + if ctx["degrade_cluster"] > 0: degrade_cluster(ctx) + def kill_processes(ctx): - for proc in ctx['procs']: + for proc in ctx["procs"]: if proc and proc.returncode is None: proc.kill() + def degrade_cluster(ctx): - if ctx['with_haproxy']: - haproxy_proc = ctx['procs'].pop() - for i in range(0,ctx['degrade_cluster']): - proc = ctx['procs'].pop() + if ctx["with_haproxy"]: + haproxy_proc = ctx["procs"].pop() + for i in range(0, ctx["degrade_cluster"]): + proc = ctx["procs"].pop() if proc is not None: kill_process(proc) - if ctx['with_haproxy']: - ctx['procs'].append(haproxy_proc) + if ctx["with_haproxy"]: + ctx["procs"].append(haproxy_proc) + -@log('Stoping proc {proc.pid}') +@log("Stoping proc {proc.pid}") def kill_process(proc): if proc and proc.returncode is None: proc.kill() + def boot_nodes(ctx): - for node in ctx['nodes']: - ctx['procs'].append(boot_node(ctx, node)) + for node in ctx["nodes"]: + ctx["procs"].append(boot_node(ctx, node)) haproxy_proc = boot_haproxy(ctx) if haproxy_proc is not None: - ctx['procs'].append(haproxy_proc) + ctx["procs"].append(haproxy_proc) def ensure_all_nodes_alive(ctx): - status = dict((num, False) for num in list(range(ctx['N']))) + status = dict((num, False) for num in list(range(ctx["N"]))) for _ in range(10): - for num in range(ctx['N']): + for num in range(ctx["N"]): if status[num]: continue - local_port, _ = get_ports(num + ctx['node_number']) + local_port, _ = get_ports(num + ctx["node_number"]) url = "http://127.0.0.1:{0}/".format(local_port) try: check_node_alive(url) @@ -382,12 +437,11 @@ def ensure_all_nodes_alive(ctx): return time.sleep(1) if not all(status.values()): - print('Failed to start all the nodes.' - ' Check the dev/logs/*.log for errors.') + print("Failed to start all the nodes." " Check the dev/logs/*.log for errors.") sys.exit(1) -@log('Check node at {url}') +@log("Check node at {url}") def check_node_alive(url): error = None for _ in range(10): @@ -403,74 +457,90 @@ def check_node_alive(url): if error is not None: raise error + def set_boot_env(ctx): # fudge default query server paths - couchjs = os.path.join(ctx['rootdir'], "src", "couch", "priv", "couchjs") - mainjs = os.path.join(ctx['rootdir'], "share", "server", "main.js") - coffeejs = os.path.join(ctx['rootdir'], "share", "server", "main-coffee.js") + couchjs = os.path.join(ctx["rootdir"], "src", "couch", "priv", "couchjs") + mainjs = os.path.join(ctx["rootdir"], "share", "server", "main.js") + coffeejs = os.path.join(ctx["rootdir"], "share", "server", "main-coffee.js") qs_javascript = toposixpath("%s %s" % (couchjs, mainjs)) qs_coffescript = toposixpath("%s %s" % (couchjs, coffeejs)) - os.environ['COUCHDB_QUERY_SERVER_JAVASCRIPT'] = qs_javascript - os.environ['COUCHDB_QUERY_SERVER_COFFEESCRIPT'] = qs_coffescript + os.environ["COUCHDB_QUERY_SERVER_JAVASCRIPT"] = qs_javascript + os.environ["COUCHDB_QUERY_SERVER_COFFEESCRIPT"] = qs_coffescript -@log('Start node {node}') + +@log("Start node {node}") def boot_node(ctx, node): - erl_libs = os.path.join(ctx['rootdir'], "src") + erl_libs = os.path.join(ctx["rootdir"], "src") set_boot_env(ctx) env = os.environ.copy() env["ERL_LIBS"] = os.pathsep.join([erl_libs]) - node_etcdir = os.path.join(ctx['devdir'], "lib", node, "etc") - reldir = os.path.join(ctx['rootdir'], "rel") + node_etcdir = os.path.join(ctx["devdir"], "lib", node, "etc") + reldir = os.path.join(ctx["rootdir"], "rel") cmd = [ "erl", - "-args_file", os.path.join(node_etcdir, "vm.args"), - "-config", os.path.join(reldir, "files", "sys"), + "-args_file", + os.path.join(node_etcdir, "vm.args"), + "-config", + os.path.join(reldir, "files", "sys"), "-couch_ini", os.path.join(node_etcdir, "default.ini"), os.path.join(node_etcdir, "local.ini"), - "-reltool_config", os.path.join(reldir, "reltool.config"), - "-parent_pid", str(os.getpid()), - "-pa", ctx['devdir'] + "-reltool_config", + os.path.join(reldir, "reltool.config"), + "-parent_pid", + str(os.getpid()), + "-pa", + ctx["devdir"], ] - cmd += [ p[:-1] for p in glob.glob(erl_libs + "/*/") ] - cmd += [ "-s", "boot_node" ] - if ctx['reset_logs']: + cmd += [p[:-1] for p in glob.glob(erl_libs + "/*/")] + cmd += ["-s", "boot_node"] + if ctx["reset_logs"]: mode = "wb" else: mode = "r+b" - logfname = os.path.join(ctx['devdir'], "logs", "%s.log" % node) + logfname = os.path.join(ctx["devdir"], "logs", "%s.log" % node) log = open(logfname, mode) cmd = [toposixpath(x) for x in cmd] return sp.Popen(cmd, stdin=sp.PIPE, stdout=log, stderr=sp.STDOUT, env=env) -@log('Running cluster setup') +@log("Running cluster setup") def cluster_setup(ctx): lead_port, _ = get_ports(1) - if enable_cluster(ctx['N'], lead_port, *ctx['admin']): - for num in range(1, ctx['N']): + if enable_cluster(ctx["N"], lead_port, *ctx["admin"]): + for num in range(1, ctx["N"]): node_port, _ = get_ports(num + 1) - enable_cluster(ctx['N'], node_port, *ctx['admin']) - add_node(lead_port, node_port, *ctx['admin']) - finish_cluster(lead_port, *ctx['admin']) + enable_cluster(ctx["N"], node_port, *ctx["admin"]) + add_node(lead_port, node_port, *ctx["admin"]) + finish_cluster(lead_port, *ctx["admin"]) return lead_port def enable_cluster(node_count, port, user, pswd): - conn = httpclient.HTTPConnection('127.0.0.1', port) - conn.request('POST', '/_cluster_setup', - json.dumps({'action': 'enable_cluster', - 'bind_address': '0.0.0.0', - 'username': user, - 'password': pswd, - 'node_count': node_count}), - {'Authorization': basic_auth_header(user, pswd), - 'Content-Type': 'application/json'}) + conn = httpclient.HTTPConnection("127.0.0.1", port) + conn.request( + "POST", + "/_cluster_setup", + json.dumps( + { + "action": "enable_cluster", + "bind_address": "0.0.0.0", + "username": user, + "password": pswd, + "node_count": node_count, + } + ), + { + "Authorization": basic_auth_header(user, pswd), + "Content-Type": "application/json", + }, + ) resp = conn.getresponse() if resp.status == 400: resp.close() @@ -481,38 +551,56 @@ def enable_cluster(node_count, port, user, pswd): def add_node(lead_port, node_port, user, pswd): - conn = httpclient.HTTPConnection('127.0.0.1', lead_port) - conn.request('POST', '/_cluster_setup', - json.dumps({'action': 'add_node', - 'host': '127.0.0.1', - 'port': node_port, - 'username': user, - 'password': pswd}), - {'Authorization': basic_auth_header(user, pswd), - 'Content-Type': 'application/json'}) + conn = httpclient.HTTPConnection("127.0.0.1", lead_port) + conn.request( + "POST", + "/_cluster_setup", + json.dumps( + { + "action": "add_node", + "host": "127.0.0.1", + "port": node_port, + "username": user, + "password": pswd, + } + ), + { + "Authorization": basic_auth_header(user, pswd), + "Content-Type": "application/json", + }, + ) resp = conn.getresponse() assert resp.status in (201, 409), resp.read() resp.close() def set_cookie(port, user, pswd): - conn = httpclient.HTTPConnection('127.0.0.1', port) - conn.request('POST', '/_cluster_setup', - json.dumps({'action': 'receive_cookie', - 'cookie': generate_cookie()}), - {'Authorization': basic_auth_header(user, pswd), - 'Content-Type': 'application/json'}) + conn = httpclient.HTTPConnection("127.0.0.1", port) + conn.request( + "POST", + "/_cluster_setup", + json.dumps({"action": "receive_cookie", "cookie": generate_cookie()}), + { + "Authorization": basic_auth_header(user, pswd), + "Content-Type": "application/json", + }, + ) resp = conn.getresponse() assert resp.status == 201, resp.read() resp.close() def finish_cluster(port, user, pswd): - conn = httpclient.HTTPConnection('127.0.0.1', port) - conn.request('POST', '/_cluster_setup', - json.dumps({'action': 'finish_cluster'}), - {'Authorization': basic_auth_header(user, pswd), - 'Content-Type': 'application/json'}) + conn = httpclient.HTTPConnection("127.0.0.1", port) + conn.request( + "POST", + "/_cluster_setup", + json.dumps({"action": "finish_cluster"}), + { + "Authorization": basic_auth_header(user, pswd), + "Content-Type": "application/json", + }, + ) resp = conn.getresponse() # 400 for already set up'ed cluster assert resp.status in (201, 400), resp.read() @@ -520,7 +608,7 @@ def finish_cluster(port, user, pswd): def basic_auth_header(user, pswd): - return 'Basic ' + base64.b64encode((user + ':' + pswd).encode()).decode() + return "Basic " + base64.b64encode((user + ":" + pswd).encode()).decode() def generate_cookie(): @@ -528,14 +616,14 @@ def generate_cookie(): def cluster_setup_with_admin_party(ctx): - host, port = '127.0.0.1', 15986 - for node in ctx['nodes']: - body = '{}' + host, port = "127.0.0.1", 15986 + for node in ctx["nodes"]: + body = "{}" conn = httpclient.HTTPConnection(host, port) - conn.request('PUT', "/_nodes/%s@127.0.0.1" % node, body) + conn.request("PUT", "/_nodes/%s@127.0.0.1" % node, body) resp = conn.getresponse() if resp.status not in (200, 201, 202, 409): - print(('Failed to join %s into cluster: %s' % (node, resp.read()))) + print(("Failed to join %s into cluster: %s" % (node, resp.read()))) sys.exit(1) create_system_databases(host, 15984) @@ -554,29 +642,31 @@ def try_request(host, port, meth, path, success_codes, retries=10, retry_dt=1): def create_system_databases(host, port): - for dbname in ['_users', '_replicator', '_global_changes']: + for dbname in ["_users", "_replicator", "_global_changes"]: conn = httpclient.HTTPConnection(host, port) - conn.request('HEAD', '/' + dbname) + conn.request("HEAD", "/" + dbname) resp = conn.getresponse() if resp.status == 404: - try_request(host, port, 'PUT', '/' + dbname, (201, 202, 412)) + try_request(host, port, "PUT", "/" + dbname, (201, 202, 412)) -@log('Developers cluster is set up at http://127.0.0.1:{lead_port}.\n' - 'Admin username: {user}\n' - 'Password: {password}\n' - 'Time to hack!') +@log( + "Developers cluster is set up at http://127.0.0.1:{lead_port}.\n" + "Admin username: {user}\n" + "Password: {password}\n" + "Time to hack!" +) def join(ctx, lead_port, user, password): while True: - for proc in ctx['procs']: + for proc in ctx["procs"]: if proc is not None and proc.returncode is not None: exit(1) time.sleep(2) -@log('Exec command {cmd}') +@log("Exec command {cmd}") def run_command(ctx, cmd): - if ctx['no_eval']: + if ctx["no_eval"]: p = sp.Popen(cmd, shell=True) p.wait() exit(p.returncode) @@ -590,9 +680,10 @@ def run_command(ctx, cmd): p.wait() exit(p.returncode) -@log('Restart all nodes') + +@log("Restart all nodes") def reboot_nodes(ctx): - ctx['reset_logs'] = False + ctx["reset_logs"] = False kill_processes(ctx) boot_nodes(ctx) ensure_all_nodes_alive(ctx) diff --git a/rel/overlay/bin/couchup b/rel/overlay/bin/couchup index 41ac4b857c4..b5ac8066f69 100755 --- a/rel/overlay/bin/couchup +++ b/rel/overlay/bin/couchup @@ -18,64 +18,72 @@ import textwrap import threading import time import sys + try: from urllib.parse import quote except ImportError: from urllib.parse import quote import requests + try: import progressbar + HAVE_BAR = True except ImportError: HAVE_BAR = False + def _tojson(req): """Support requests v0.x as well as 1.x+""" - if requests.__version__[0] == '0': + if requests.__version__[0] == "0": return json.loads(req.content) return req.json() + def _args(args): args = vars(args) - if args['password']: - args['creds'] = (args['login'], args['password']) + if args["password"]: + args["creds"] = (args["login"], args["password"]) else: - args['creds'] = None + args["creds"] = None return args + def _do_list(args): - port = str(args['local_port']) - req = requests.get('http://127.0.0.1:' + port + '/_all_dbs', - auth=args['creds']) + port = str(args["local_port"]) + req = requests.get("http://127.0.0.1:" + port + "/_all_dbs", auth=args["creds"]) req.raise_for_status() dbs = _tojson(req) - local_dbs = [x for x in dbs if "shards" not in x - and x not in ['_dbs', '_nodes']] - clustered_dbs = list(set( - [x.split('/')[2].split('.')[0] for x in dbs if "shards" in x] - )) - if not args['include_system_dbs']: + local_dbs = [x for x in dbs if "shards" not in x and x not in ["_dbs", "_nodes"]] + clustered_dbs = list( + set([x.split("/")[2].split(".")[0] for x in dbs if "shards" in x]) + ) + if not args["include_system_dbs"]: # list comprehension to eliminate dbs starting with underscore - local_dbs = [x for x in local_dbs if x[0] != '_'] - clustered_dbs = [x for x in clustered_dbs if x[0] != '_'] + local_dbs = [x for x in local_dbs if x[0] != "_"] + clustered_dbs = [x for x in clustered_dbs if x[0] != "_"] local_dbs.sort() clustered_dbs.sort() - if args.get('clustered'): + if args.get("clustered"): return clustered_dbs return local_dbs + def _list(args): args = _args(args) ret = _do_list(args) print(", ".join(ret)) -def _watch_replication(db, - local_port=5986, - clustered_port=5984, - creds=None, - hide_progress_bar=False, - quiet=False, - timeout=30): + +def _watch_replication( + db, + local_port=5986, + clustered_port=5984, + creds=None, + hide_progress_bar=False, + quiet=False, + timeout=30, +): """Watches replication, optionally with a progressbar.""" time.sleep(1) if not quiet: @@ -86,22 +94,25 @@ def _watch_replication(db, req.raise_for_status() req = _tojson(req) # here, local means node-local, i.e. source (1.x) database - local_docs = req['doc_count'] - local_size = req['data_size'] + local_docs = req["doc_count"] + local_size = req["data_size"] except requests.exceptions.HTTPError: - raise Exception('Cannot retrieve {} doc_count!'.format(db)) + raise Exception("Cannot retrieve {} doc_count!".format(db)) if local_size == 0: return if HAVE_BAR and not hide_progress_bar and not quiet: widgets = [ db, - ' ', progressbar.Percentage(), - ' ', progressbar.Bar(marker=progressbar.RotatingMarker()), - ' ', progressbar.ETA(), - ' ', progressbar.FileTransferSpeed(), + " ", + progressbar.Percentage(), + " ", + progressbar.Bar(marker=progressbar.RotatingMarker()), + " ", + progressbar.ETA(), + " ", + progressbar.FileTransferSpeed(), ] - progbar = progressbar.ProgressBar(widgets=widgets, - maxval=local_size).start() + progbar = progressbar.ProgressBar(widgets=widgets, maxval=local_size).start() count = 0 stall_count = 0 url = "http://127.0.0.1:{}/{}".format(clustered_port, db) @@ -111,22 +122,21 @@ def _watch_replication(db, req.raise_for_status() req = _tojson(req) # here, cluster means clustered port, i.e. port 5984 - clus_count = req['doc_count'] - clus_size = req['data_size'] + clus_count = req["doc_count"] + clus_size = req["data_size"] except requests.exceptions.HTTPError as exc: if exc.response.status_code == 404: clus_count = 0 clus_size = 0 else: - raise Exception('Cannot retrieve {} doc_count!'.format(db)) + raise Exception("Cannot retrieve {} doc_count!".format(db)) if count == clus_count: stall_count += 1 else: stall_count = 0 if stall_count == timeout: if not quiet: - print( - "Replication is stalled. Increase timeout or reduce load.") + print("Replication is stalled. Increase timeout or reduce load.") exit(1) if HAVE_BAR and not hide_progress_bar and not quiet: if clus_size > local_size: @@ -138,264 +148,302 @@ def _watch_replication(db, progbar.finish() return 0 + def _put_filter(args, db=None): """Adds _design/repl_filters tombstone replication filter to DB.""" ddoc = { - '_id': '_design/repl_filters', - 'filters': { - 'no_deleted': 'function(doc,req){return !doc._deleted;};' - } + "_id": "_design/repl_filters", + "filters": {"no_deleted": "function(doc,req){return !doc._deleted;};"}, } try: req = requests.get( - 'http://127.0.0.1:{}/{}/_design/repl_filters'.format( - args['local_port'], db), - auth=args['creds']) + "http://127.0.0.1:{}/{}/_design/repl_filters".format( + args["local_port"], db + ), + auth=args["creds"], + ) req.raise_for_status() doc = _tojson(req) - del doc['_rev'] + del doc["_rev"] if doc != ddoc: - if not args['quiet']: - print('Source replication filter does not match! Aborting.') + if not args["quiet"]: + print("Source replication filter does not match! Aborting.") exit(1) except requests.exceptions.HTTPError as exc: if exc.response.status_code == 404: - if not args['quiet']: - print('Adding replication filter to source database...') + if not args["quiet"]: + print("Adding replication filter to source database...") req = requests.put( - 'http://127.0.0.1:{}/{}/_design/repl_filters'.format( - args['local_port'], db), + "http://127.0.0.1:{}/{}/_design/repl_filters".format( + args["local_port"], db + ), data=json.dumps(ddoc), - auth=args['creds']) + auth=args["creds"], + ) req.raise_for_status() - elif not args['quiet']: + elif not args["quiet"]: print(exc.response.text) exit(1) + def _do_security(args, db=None): """Copies the _security object from source to target DB.""" try: req = requests.get( - 'http://127.0.0.1:{}/{}/_security'.format( - args['local_port'], db), - auth=args['creds']) + "http://127.0.0.1:{}/{}/_security".format(args["local_port"], db), + auth=args["creds"], + ) req.raise_for_status() security_doc = _tojson(req) req = requests.put( - 'http://127.0.0.1:{}/{}/_security'.format( - args['clustered_port'], db), - data=json.dumps(security_doc), - auth=args['creds']) + "http://127.0.0.1:{}/{}/_security".format(args["clustered_port"], db), + data=json.dumps(security_doc), + auth=args["creds"], + ) req.raise_for_status() except requests.exceptions.HTTPError as exc: print(exc.response.text) exit(1) + def _replicate(args): args = _args(args) - if args['all_dbs']: + if args["all_dbs"]: dbs = _do_list(args) else: - dbs = args['dbs'] + dbs = args["dbs"] for db in dbs: - if args['filter_deleted']: + if args["filter_deleted"]: _put_filter(args, db) - if not args['quiet']: - print('Starting replication for ' + db + '...') - db = quote(db, safe='') + if not args["quiet"]: + print("Starting replication for " + db + "...") + db = quote(db, safe="") doc = { - 'continuous': False, - 'create_target': True, - 'source': { - 'url': 'http://127.0.0.1:{}/{}'.format( - args['local_port'], db) + "continuous": False, + "create_target": True, + "source": {"url": "http://127.0.0.1:{}/{}".format(args["local_port"], db)}, + "target": { + "url": "http://127.0.0.1:{}/{}".format(args["clustered_port"], db) }, - 'target': { - 'url': 'http://127.0.0.1:{}/{}'.format( - args['clustered_port'], db) - } } - if args['filter_deleted']: - doc['filter'] = 'repl_filters/no_deleted' - if args['creds']: - auth = 'Basic ' + base64.b64encode(':'.join(args['creds'])) - headers = { - 'authorization': auth - } - doc['source']['headers'] = headers - doc['target']['headers'] = headers - watch_args = {y: args[y] for y in [ - 'local_port', 'clustered_port', 'creds', 'hide_progress_bar', - 'timeout', 'quiet']} - watch_args['db'] = db + if args["filter_deleted"]: + doc["filter"] = "repl_filters/no_deleted" + if args["creds"]: + auth = "Basic " + base64.b64encode(":".join(args["creds"])) + headers = {"authorization": auth} + doc["source"]["headers"] = headers + doc["target"]["headers"] = headers + watch_args = { + y: args[y] + for y in [ + "local_port", + "clustered_port", + "creds", + "hide_progress_bar", + "timeout", + "quiet", + ] + } + watch_args["db"] = db watch = threading.Thread(target=_watch_replication, kwargs=watch_args) watch.start() try: - req = requests.post('http://127.0.0.1:{}/_replicate'.format( - args['clustered_port']), - auth=args['creds'], + req = requests.post( + "http://127.0.0.1:{}/_replicate".format(args["clustered_port"]), + auth=args["creds"], data=json.dumps(doc), - headers={'Content-type': 'application/json'}) + headers={"Content-type": "application/json"}, + ) req.raise_for_status() req = _tojson(req) except requests.exceptions.HTTPError as exc: - if not args['quiet']: + if not args["quiet"]: print(exc.response.text) exit(1) watch.join() - if req.get('no_changes'): - if not args['quiet']: + if req.get("no_changes"): + if not args["quiet"]: print("No changes, replication is caught up.") - if not args['quiet']: - print('Copying _security object for ' + db + '...') + if not args["quiet"]: + print("Copying _security object for " + db + "...") _do_security(args, db) - if not args['quiet']: + if not args["quiet"]: print("Replication complete.") + def _rebuild(args): args = _args(args) - if args['all_dbs']: - if args['views']: - if not args['quiet']: + if args["all_dbs"]: + if args["views"]: + if not args["quiet"]: print("Cannot take list of views for more than 1 database.") exit(1) - args['clustered'] = True + args["clustered"] = True dbs = _do_list(args) else: - dbs = [args['db']] + dbs = [args["db"]] for db in dbs: - if args['views']: - views = args['views'] + if args["views"]: + views = args["views"] else: try: - req = requests.get('http://127.0.0.1:{}/{}/_all_docs'.format( - args['clustered_port'], db), - params={ - 'start_key': '"_design/"', - 'end_key': '"_design0"' - }, - auth=args['creds']) + req = requests.get( + "http://127.0.0.1:{}/{}/_all_docs".format( + args["clustered_port"], db + ), + params={"start_key": '"_design/"', "end_key": '"_design0"'}, + auth=args["creds"], + ) req.raise_for_status() req = _tojson(req) except requests.exceptions.HTTPError as exc: - if not args['quiet']: + if not args["quiet"]: print(exc.response.text) exit(1) - req = req['rows'] - ddocs = [x['id'].split('/')[1] for x in req] + req = req["rows"] + ddocs = [x["id"].split("/")[1] for x in req] for ddoc in ddocs: try: - req = requests.get('http://127.0.0.1:{}/{}/_design/{}'.format( - args['clustered_port'], db, ddoc), - auth=args['creds']) + req = requests.get( + "http://127.0.0.1:{}/{}/_design/{}".format( + args["clustered_port"], db, ddoc + ), + auth=args["creds"], + ) req.raise_for_status() doc = _tojson(req) except requests.exceptions.HTTPError as exc: - if not args['quiet']: + if not args["quiet"]: print(exc.response.text) exit(1) - if 'views' not in doc: - if not args['quiet']: + if "views" not in doc: + if not args["quiet"]: print("Skipping {}/{}, no views found".format(db, ddoc)) continue # only need to refresh a single view per ddoc - if not args['quiet']: + if not args["quiet"]: print("Refreshing views in {}/{}...".format(db, ddoc)) - view = list(doc['views'].keys())[0] + view = list(doc["views"].keys())[0] try: req = requests.get( - 'http://127.0.0.1:{}/{}/_design/{}/_view/{}'.format( - args['clustered_port'], db, ddoc, view), - params={'limit': 1}, - auth=args['creds'], - timeout=float(args['timeout'])) + "http://127.0.0.1:{}/{}/_design/{}/_view/{}".format( + args["clustered_port"], db, ddoc, view + ), + params={"limit": 1}, + auth=args["creds"], + timeout=float(args["timeout"]), + ) except requests.exceptions.Timeout: - if not args['quiet']: + if not args["quiet"]: print("Timeout, view is processing. Moving on.") except requests.exceptions.HTTPError as exc: - if not args['quiet']: + if not args["quiet"]: print(exc.response.text) exit(1) + def _delete(args): args = _args(args) - if args['all_dbs']: - args['include_system_dbs'] = False + if args["all_dbs"]: + args["include_system_dbs"] = False dbs = _do_list(args) else: - dbs = args['dbs'] + dbs = args["dbs"] for db in dbs: - db = quote(db, safe='') - local_url = 'http://127.0.0.1:{}/{}'.format(args['local_port'], db) - clus_url = 'http://127.0.0.1:{}/{}'.format(args['clustered_port'], db) + db = quote(db, safe="") + local_url = "http://127.0.0.1:{}/{}".format(args["local_port"], db) + clus_url = "http://127.0.0.1:{}/{}".format(args["clustered_port"], db) try: - req = requests.get(local_url, auth=args['creds']) + req = requests.get(local_url, auth=args["creds"]) req.raise_for_status() req = _tojson(req) - local_docs = req['doc_count'] - req = requests.get(clus_url, auth=args['creds']) + local_docs = req["doc_count"] + req = requests.get(clus_url, auth=args["creds"]) req.raise_for_status() req = _tojson(req) - clus_docs = req['doc_count'] - if clus_docs < local_docs and not args['force']: - if not args['quiet']: - print('Clustered DB has less docs than local version!' + - ' Skipping...') + clus_docs = req["doc_count"] + if clus_docs < local_docs and not args["force"]: + if not args["quiet"]: + print( + "Clustered DB has less docs than local version!" + + " Skipping..." + ) continue - if not args['quiet']: - print('Deleting ' + db + '...') - req = requests.delete('http://127.0.0.1:{}/{}'.format( - args['local_port'], db), - auth=args['creds']) + if not args["quiet"]: + print("Deleting " + db + "...") + req = requests.delete( + "http://127.0.0.1:{}/{}".format(args["local_port"], db), + auth=args["creds"], + ) req.raise_for_status() except requests.exceptions.HTTPError as exc: - if not args['quiet']: + if not args["quiet"]: print(exc.response.text) exit(1) + def main(argv): """Kindly do the needful.""" - parser = argparse.ArgumentParser(prog='couchup', + parser = argparse.ArgumentParser( + prog="couchup", formatter_class=argparse.RawDescriptionHelpFormatter, - description=textwrap.dedent('''\ + description=textwrap.dedent( + """\ Migrate CouchDB 1.x databases to CouchDB 2.x. Specify a subcommand and -h or --help for more help. - ''')) + """ + ), + ) subparsers = parser.add_subparsers() - parser_list = subparsers.add_parser('list', - help='lists all CouchDB 1.x databases', + parser_list = subparsers.add_parser( + "list", + help="lists all CouchDB 1.x databases", formatter_class=argparse.RawTextHelpFormatter, - description=textwrap.dedent('''\ + description=textwrap.dedent( + """\ Examples: couchup list couchup list -c -i -p mysecretpassword - ''')) - parser_list.add_argument('-c', '--clustered', action='store_true', - help='show clustered (2.x) databases instead') - parser_list.add_argument('-i', '--include-system-dbs', - action='store_true', - help='include system databases (_users, _replicator, etc.)') - parser_list.add_argument('-l', '--login', default='admin', - help='specify login (default admin)') - parser_list.add_argument('-p', '--password', - help='specify password') - parser_list.add_argument('--local-port', default=5986, - help='override local port (default 5986)') - parser_list.add_argument('--clustered-port', default=5984, - help='override clustered port (default 5984)') + """ + ), + ) + parser_list.add_argument( + "-c", + "--clustered", + action="store_true", + help="show clustered (2.x) databases instead", + ) + parser_list.add_argument( + "-i", + "--include-system-dbs", + action="store_true", + help="include system databases (_users, _replicator, etc.)", + ) + parser_list.add_argument( + "-l", "--login", default="admin", help="specify login (default admin)" + ) + parser_list.add_argument("-p", "--password", help="specify password") + parser_list.add_argument( + "--local-port", default=5986, help="override local port (default 5986)" + ) + parser_list.add_argument( + "--clustered-port", default=5984, help="override clustered port (default 5984)" + ) parser_list.set_defaults(func=_list) - parser_replicate = subparsers.add_parser('replicate', - help='replicates one or more 1.x databases to CouchDB 2.x', + parser_replicate = subparsers.add_parser( + "replicate", + help="replicates one or more 1.x databases to CouchDB 2.x", formatter_class=argparse.RawTextHelpFormatter, - description=textwrap.dedent('''\ + description=textwrap.dedent( + """\ Examples: couchup replicate movies couchup replicate -f lots_of_deleted_docs_db @@ -411,90 +459,144 @@ def main(argv): It is IMPORTANT that no documents be deleted from the 1.x database during this process, or those deletions may not successfully replicate to the 2.x database. - ''')) - parser_replicate.add_argument('-a', '--all-dbs', action='store_true', - help='act on all databases available') - parser_replicate.add_argument('-i', '--include-system-dbs', - action='store_true', - help='include system databases (_users, _replicator, etc.)') - parser_replicate.add_argument('-q', '--quiet', action='store_true', - help='suppress all output') - parser_replicate.add_argument('-n', '--hide-progress-bar', - action='store_true', - help='suppress progress bar display') - parser_replicate.add_argument('-f', '--filter-deleted', - action='store_true', - help='filter deleted document tombstones during replication') - parser_replicate.add_argument('-t', '--timeout', default=30, - help='stalled replication timeout threshhold in s (def: 30)') - parser_replicate.add_argument('-l', '--login', default='admin', - help='specify login (default admin)') - parser_replicate.add_argument('-p', '--password', - help='specify password') - parser_replicate.add_argument('--local-port', default=5986, - help='override local port (default 5986)') - parser_replicate.add_argument('--clustered-port', default=5984, - help='override clustered port (default 5984)') - parser_replicate.add_argument('dbs', metavar='db', type=str, nargs="*", - help="database(s) to be processed") + """ + ), + ) + parser_replicate.add_argument( + "-a", "--all-dbs", action="store_true", help="act on all databases available" + ) + parser_replicate.add_argument( + "-i", + "--include-system-dbs", + action="store_true", + help="include system databases (_users, _replicator, etc.)", + ) + parser_replicate.add_argument( + "-q", "--quiet", action="store_true", help="suppress all output" + ) + parser_replicate.add_argument( + "-n", + "--hide-progress-bar", + action="store_true", + help="suppress progress bar display", + ) + parser_replicate.add_argument( + "-f", + "--filter-deleted", + action="store_true", + help="filter deleted document tombstones during replication", + ) + parser_replicate.add_argument( + "-t", + "--timeout", + default=30, + help="stalled replication timeout threshhold in s (def: 30)", + ) + parser_replicate.add_argument( + "-l", "--login", default="admin", help="specify login (default admin)" + ) + parser_replicate.add_argument("-p", "--password", help="specify password") + parser_replicate.add_argument( + "--local-port", default=5986, help="override local port (default 5986)" + ) + parser_replicate.add_argument( + "--clustered-port", default=5984, help="override clustered port (default 5984)" + ) + parser_replicate.add_argument( + "dbs", metavar="db", type=str, nargs="*", help="database(s) to be processed" + ) parser_replicate.set_defaults(func=_replicate) - parser_rebuild = subparsers.add_parser('rebuild', - help='rebuilds one or more CouchDB 2.x views', + parser_rebuild = subparsers.add_parser( + "rebuild", + help="rebuilds one or more CouchDB 2.x views", formatter_class=argparse.RawTextHelpFormatter, - description=textwrap.dedent('''\ + description=textwrap.dedent( + """\ Examples: couchup rebuild movies couchup rebuild movies by_name couchup rebuild -a -q -p mysecretpassword - ''')) - parser_rebuild.add_argument('-a', '--all-dbs', action='store_true', - help='act on all databases available') - parser_rebuild.add_argument('-q', '--quiet', action='store_true', - help='suppress all output') - parser_rebuild.add_argument('-t', '--timeout', default=5, - help='timeout for waiting for view rebuild in s (default: 5)') - parser_rebuild.add_argument('-i', '--include-system-dbs', - action='store_true', - help='include system databases (_users, _replicator, etc.)') - parser_rebuild.add_argument('-l', '--login', default='admin', - help='specify login (default admin)') - parser_rebuild.add_argument('-p', '--password', - help='specify password') - parser_rebuild.add_argument('--local-port', default=5986, - help='override local port (default 5986)') - parser_rebuild.add_argument('--clustered-port', default=5984, - help='override clustered port (default 5984)') - parser_rebuild.add_argument('db', metavar='db', type=str, nargs="?", - help="database to be processed") - parser_rebuild.add_argument('views', metavar='view', type=str, nargs="*", - help="view(s) to be processed (all by default)") + """ + ), + ) + parser_rebuild.add_argument( + "-a", "--all-dbs", action="store_true", help="act on all databases available" + ) + parser_rebuild.add_argument( + "-q", "--quiet", action="store_true", help="suppress all output" + ) + parser_rebuild.add_argument( + "-t", + "--timeout", + default=5, + help="timeout for waiting for view rebuild in s (default: 5)", + ) + parser_rebuild.add_argument( + "-i", + "--include-system-dbs", + action="store_true", + help="include system databases (_users, _replicator, etc.)", + ) + parser_rebuild.add_argument( + "-l", "--login", default="admin", help="specify login (default admin)" + ) + parser_rebuild.add_argument("-p", "--password", help="specify password") + parser_rebuild.add_argument( + "--local-port", default=5986, help="override local port (default 5986)" + ) + parser_rebuild.add_argument( + "--clustered-port", default=5984, help="override clustered port (default 5984)" + ) + parser_rebuild.add_argument( + "db", metavar="db", type=str, nargs="?", help="database to be processed" + ) + parser_rebuild.add_argument( + "views", + metavar="view", + type=str, + nargs="*", + help="view(s) to be processed (all by default)", + ) parser_rebuild.set_defaults(func=_rebuild) - parser_delete = subparsers.add_parser('delete', - help='deletes one or more CouchDB 1.x databases', + parser_delete = subparsers.add_parser( + "delete", + help="deletes one or more CouchDB 1.x databases", formatter_class=argparse.RawTextHelpFormatter, - description=textwrap.dedent('''\ + description=textwrap.dedent( + """\ Examples: couchup delete movies couchup delete -q -p mysecretpassword movies - ''')) - parser_delete.add_argument('-a', '--all-dbs', action='store_true', - help='act on all databases available') - parser_delete.add_argument('-f', '--force', action='store_true', - help='force deletion even if 1.x and 2.x databases are not identical') - parser_delete.add_argument('-q', '--quiet', action='store_true', - help='suppress all output') - parser_delete.add_argument('-l', '--login', default='admin', - help='specify login (default admin)') - parser_delete.add_argument('-p', '--password', - help='specify password') - parser_delete.add_argument('--local-port', default=5986, - help='override local port (default 5986)') - parser_delete.add_argument('--clustered-port', default=5984, - help='override clustered port (default 5984)') - parser_delete.add_argument('dbs', metavar='db', type=str, nargs="*", - help="database(s) to be processed") + """ + ), + ) + parser_delete.add_argument( + "-a", "--all-dbs", action="store_true", help="act on all databases available" + ) + parser_delete.add_argument( + "-f", + "--force", + action="store_true", + help="force deletion even if 1.x and 2.x databases are not identical", + ) + parser_delete.add_argument( + "-q", "--quiet", action="store_true", help="suppress all output" + ) + parser_delete.add_argument( + "-l", "--login", default="admin", help="specify login (default admin)" + ) + parser_delete.add_argument("-p", "--password", help="specify password") + parser_delete.add_argument( + "--local-port", default=5986, help="override local port (default 5986)" + ) + parser_delete.add_argument( + "--clustered-port", default=5984, help="override clustered port (default 5984)" + ) + parser_delete.add_argument( + "dbs", metavar="db", type=str, nargs="*", help="database(s) to be processed" + ) parser_delete.set_defaults(func=_delete) args = parser.parse_args(argv[1:]) @@ -504,5 +606,6 @@ def main(argv): parser.print_help() sys.exit(0) -if __name__ == '__main__': + +if __name__ == "__main__": main(sys.argv) diff --git a/src/mango/test/01-index-crud-test.py b/src/mango/test/01-index-crud-test.py index f57db39af87..b60239992c1 100644 --- a/src/mango/test/01-index-crud-test.py +++ b/src/mango/test/01-index-crud-test.py @@ -17,20 +17,11 @@ import unittest DOCS = [ - { - "_id": "1", - "name": "Jimi", - "age": 10, - "cars": 1 - }, - { - "_id": "2", - "name": "kate", - "age": 8, - "cars": 0 - } + {"_id": "1", "name": "Jimi", "age": 10, "cars": 1}, + {"_id": "2", "name": "kate", "age": 8, "cars": 0}, ] + class IndexCrudTests(mango.DbPerClass): def setUp(self): self.db.recreate() @@ -46,7 +37,7 @@ def test_bad_fields(self): [{"foo": 2}], [{"foo": "asc", "bar": "desc"}], [{"foo": "asc"}, {"bar": "desc"}], - [""] + [""], ] for fields in bad_fields: try: @@ -62,27 +53,23 @@ def test_bad_types(self): True, False, 1.5, - "foo", # Future support - "geo", # Future support + "foo", # Future support + "geo", # Future support {"foo": "bar"}, - ["baz", 3.0] + ["baz", 3.0], ] for bt in bad_types: try: self.db.create_index(["foo"], idx_type=bt) except Exception as e: - self.assertEqual(e.response.status_code, 400, (bt, e.response.status_code)) + self.assertEqual( + e.response.status_code, 400, (bt, e.response.status_code) + ) else: raise AssertionError("bad create index") def test_bad_names(self): - bad_names = [ - True, - False, - 1.5, - {"foo": "bar"}, - [None, False] - ] + bad_names = [True, False, 1.5, {"foo": "bar"}, [None, False]] for bn in bad_names: try: self.db.create_index(["foo"], name=bn) @@ -136,7 +123,7 @@ def test_read_idx_doc(self): doc = self.db.open_doc(ddocid) self.assertEqual(doc["_id"], ddocid) info = self.db.ddoc_info(ddocid) - self.assertEqual(info["name"], ddocid.split('_design/')[-1]) + self.assertEqual(info["name"], ddocid.split("_design/")[-1]) def test_delete_idx_escaped(self): self.db.create_index(["foo", "bar"], name="idx_01") @@ -271,7 +258,7 @@ def test_limit_skip_index(self): assert ret is True self.assertEqual(len(self.db.list_indexes(limit=2)), 2) - self.assertEqual(len(self.db.list_indexes(limit=5,skip=4)), 2) + self.assertEqual(len(self.db.list_indexes(limit=5, skip=4)), 2) self.assertEqual(len(self.db.list_indexes(skip=5)), 1) self.assertEqual(len(self.db.list_indexes(skip=6)), 0) self.assertEqual(len(self.db.list_indexes(skip=100)), 0) @@ -291,19 +278,19 @@ def test_out_of_sync(self): self.db.save_docs(copy.deepcopy(DOCS)) self.db.create_index(["age"], name="age") - selector = { - "age": { - "$gt": 0 - }, - } - docs = self.db.find(selector, - use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4") + selector = {"age": {"$gt": 0}} + docs = self.db.find( + selector, use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4" + ) self.assertEqual(len(docs), 2) self.db.delete_doc("1") - docs1 = self.db.find(selector, update="False", - use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4") + docs1 = self.db.find( + selector, + update="False", + use_index="_design/a017b603a47036005de93034ff689bbbb6a873c4", + ) self.assertEqual(len(docs1), 1) @@ -314,18 +301,18 @@ def setUp(self): def test_create_text_idx(self): fields = [ - {"name":"stringidx", "type" : "string"}, - {"name":"booleanidx", "type": "boolean"} + {"name": "stringidx", "type": "string"}, + {"name": "booleanidx", "type": "boolean"}, ] ret = self.db.create_text_index(fields=fields, name="text_idx_01") assert ret is True for idx in self.db.list_indexes(): if idx["name"] != "text_idx_01": continue - self.assertEqual(idx["def"]["fields"], [ - {"stringidx": "string"}, - {"booleanidx": "boolean"} - ]) + self.assertEqual( + idx["def"]["fields"], + [{"stringidx": "string"}, {"booleanidx": "boolean"}], + ) return raise AssertionError("index not created") @@ -339,9 +326,9 @@ def test_create_bad_text_idx(self): [{"name": "foo2"}], [{"name": "foo3", "type": "garbage"}], [{"type": "number"}], - [{"name": "age", "type": "number"} , {"name": "bad"}], - [{"name": "age", "type": "number"} , "bla"], - [{"name": "", "type": "number"} , "bla"] + [{"name": "age", "type": "number"}, {"name": "bad"}], + [{"name": "age", "type": "number"}, "bla"], + [{"name": "", "type": "number"}, "bla"], ] for fields in bad_fields: try: @@ -350,7 +337,7 @@ def test_create_bad_text_idx(self): self.assertEqual(e.response.status_code, 400) else: raise AssertionError("bad create text index") - + def test_limit_skip_index(self): fields = ["field1"] ret = self.db.create_index(fields, name="idx_01") @@ -369,14 +356,14 @@ def test_limit_skip_index(self): assert ret is True fields = [ - {"name":"stringidx", "type" : "string"}, - {"name":"booleanidx", "type": "boolean"} + {"name": "stringidx", "type": "string"}, + {"name": "booleanidx", "type": "boolean"}, ] ret = self.db.create_text_index(fields=fields, name="idx_05") assert ret is True self.assertEqual(len(self.db.list_indexes(limit=2)), 2) - self.assertEqual(len(self.db.list_indexes(limit=5,skip=4)), 2) + self.assertEqual(len(self.db.list_indexes(limit=5, skip=4)), 2) self.assertEqual(len(self.db.list_indexes(skip=5)), 1) self.assertEqual(len(self.db.list_indexes(skip=6)), 0) self.assertEqual(len(self.db.list_indexes(skip=100)), 0) diff --git a/src/mango/test/02-basic-find-test.py b/src/mango/test/02-basic-find-test.py index cfb0bae0963..0fc4248a8ae 100644 --- a/src/mango/test/02-basic-find-test.py +++ b/src/mango/test/02-basic-find-test.py @@ -14,8 +14,8 @@ import mango -class BasicFindTests(mango.UserDocsTests): +class BasicFindTests(mango.UserDocsTests): def test_bad_selector(self): bad_selectors = [ None, @@ -23,9 +23,9 @@ def test_bad_selector(self): False, 1.0, "foobarbaz", - {"foo":{"$not_an_op": 2}}, - {"$gt":2}, - [None, "bing"] + {"foo": {"$not_an_op": 2}}, + {"$gt": 2}, + [None, "bing"], ] for bs in bad_selectors: try: @@ -36,112 +36,84 @@ def test_bad_selector(self): raise AssertionError("bad find") def test_bad_limit(self): - bad_limits = [ - None, - True, - False, - -1, - 1.2, - "no limit!", - {"foo": "bar"}, - [2] - ], + bad_limits = ([None, True, False, -1, 1.2, "no limit!", {"foo": "bar"}, [2]],) for bl in bad_limits: try: - self.db.find({"int":{"$gt":2}}, limit=bl) + self.db.find({"int": {"$gt": 2}}, limit=bl) except Exception as e: assert e.response.status_code == 400 else: raise AssertionError("bad find") def test_bad_skip(self): - bad_skips = [ - None, - True, - False, - -3, - 1.2, - "no limit!", - {"foo": "bar"}, - [2] - ], + bad_skips = ([None, True, False, -3, 1.2, "no limit!", {"foo": "bar"}, [2]],) for bs in bad_skips: try: - self.db.find({"int":{"$gt":2}}, skip=bs) + self.db.find({"int": {"$gt": 2}}, skip=bs) except Exception as e: assert e.response.status_code == 400 else: raise AssertionError("bad find") def test_bad_sort(self): - bad_sorts = [ - None, - True, - False, - 1.2, - "no limit!", - {"foo": "bar"}, - [2], - [{"foo":"asc", "bar": "asc"}], - [{"foo":"asc"}, {"bar":"desc"}], - ], + bad_sorts = ( + [ + None, + True, + False, + 1.2, + "no limit!", + {"foo": "bar"}, + [2], + [{"foo": "asc", "bar": "asc"}], + [{"foo": "asc"}, {"bar": "desc"}], + ], + ) for bs in bad_sorts: try: - self.db.find({"int":{"$gt":2}}, sort=bs) + self.db.find({"int": {"$gt": 2}}, sort=bs) except Exception as e: assert e.response.status_code == 400 else: raise AssertionError("bad find") def test_bad_fields(self): - bad_fields = [ - None, - True, - False, - 1.2, - "no limit!", - {"foo": "bar"}, - [2], - [[]], - ["foo", 2.0], - ], + bad_fields = ( + [ + None, + True, + False, + 1.2, + "no limit!", + {"foo": "bar"}, + [2], + [[]], + ["foo", 2.0], + ], + ) for bf in bad_fields: try: - self.db.find({"int":{"$gt":2}}, fields=bf) + self.db.find({"int": {"$gt": 2}}, fields=bf) except Exception as e: assert e.response.status_code == 400 else: raise AssertionError("bad find") def test_bad_r(self): - bad_rs = [ - None, - True, - False, - 1.2, - "no limit!", - {"foo": "bar"}, - [2], - ], + bad_rs = ([None, True, False, 1.2, "no limit!", {"foo": "bar"}, [2]],) for br in bad_rs: try: - self.db.find({"int":{"$gt":2}}, r=br) + self.db.find({"int": {"$gt": 2}}, r=br) except Exception as e: assert e.response.status_code == 400 else: raise AssertionError("bad find") def test_bad_conflicts(self): - bad_conflicts = [ - None, - 1.2, - "no limit!", - {"foo": "bar"}, - [2], - ], + bad_conflicts = ([None, 1.2, "no limit!", {"foo": "bar"}, [2]],) for bc in bad_conflicts: try: - self.db.find({"int":{"$gt":2}}, conflicts=bc) + self.db.find({"int": {"$gt": 2}}, conflicts=bc) except Exception as e: assert e.response.status_code == 400 else: @@ -161,8 +133,10 @@ def test_multi_cond_and(self): def test_multi_cond_duplicate_field(self): # need to explicitly define JSON as dict won't allow duplicate keys - body = ("{\"selector\":{\"location.city\":{\"$regex\": \"^L+\"}," - "\"location.city\":{\"$exists\":true}}}") + body = ( + '{"selector":{"location.city":{"$regex": "^L+"},' + '"location.city":{"$exists":true}}}' + ) r = self.db.sess.post(self.db.path("_find"), data=body) r.raise_for_status() docs = r.json()["docs"] @@ -172,27 +146,25 @@ def test_multi_cond_duplicate_field(self): self.assertEqual(len(docs), 15) def test_multi_cond_or(self): - docs = self.db.find({ - "$and":[ - {"age":{"$gte": 75}}, - {"$or": [ - {"name.first": "Mathis"}, - {"name.first": "Whitley"} - ]} + docs = self.db.find( + { + "$and": [ + {"age": {"$gte": 75}}, + {"$or": [{"name.first": "Mathis"}, {"name.first": "Whitley"}]}, ] - }) + } + ) assert len(docs) == 2 assert docs[0]["user_id"] == 11 assert docs[1]["user_id"] == 13 def test_multi_col_idx(self): - docs = self.db.find({ - "location.state": {"$and": [ - {"$gt": "Hawaii"}, - {"$lt": "Maine"} - ]}, - "location.city": {"$lt": "Longbranch"} - }) + docs = self.db.find( + { + "location.state": {"$and": [{"$gt": "Hawaii"}, {"$lt": "Maine"}]}, + "location.city": {"$lt": "Longbranch"}, + } + ) assert len(docs) == 1 assert docs[0]["user_id"] == 6 @@ -226,32 +198,32 @@ def test_skip(self): assert len(docs) == (15 - s) def test_sort(self): - docs1 = self.db.find({"age": {"$gt": 0}}, sort=[{"age":"asc"}]) + docs1 = self.db.find({"age": {"$gt": 0}}, sort=[{"age": "asc"}]) docs2 = list(sorted(docs1, key=lambda d: d["age"])) assert docs1 is not docs2 and docs1 == docs2 - docs1 = self.db.find({"age": {"$gt": 0}}, sort=[{"age":"desc"}]) + docs1 = self.db.find({"age": {"$gt": 0}}, sort=[{"age": "desc"}]) docs2 = list(reversed(sorted(docs1, key=lambda d: d["age"]))) assert docs1 is not docs2 and docs1 == docs2 def test_sort_desc_complex(self): - docs = self.db.find({ - "company": {"$lt": "M"}, - "$or": [ - {"company": "Dreamia"}, - {"manager": True} - ] - }, sort=[{"company":"desc"}, {"manager":"desc"}]) - + docs = self.db.find( + { + "company": {"$lt": "M"}, + "$or": [{"company": "Dreamia"}, {"manager": True}], + }, + sort=[{"company": "desc"}, {"manager": "desc"}], + ) + companies_returned = list(d["company"] for d in docs) desc_companies = sorted(companies_returned, reverse=True) self.assertEqual(desc_companies, companies_returned) def test_sort_with_primary_sort_not_in_selector(self): try: - docs = self.db.find({ - "name.last": {"$lt": "M"} - }, sort=[{"name.first":"desc"}]) + docs = self.db.find( + {"name.last": {"$lt": "M"}}, sort=[{"name.first": "desc"}] + ) except Exception as e: self.assertEqual(e.response.status_code, 400) resp = e.response.json() @@ -260,19 +232,21 @@ def test_sort_with_primary_sort_not_in_selector(self): raise AssertionError("expected find error") def test_sort_exists_true(self): - docs1 = self.db.find({"age": {"$gt": 0, "$exists": True}}, sort=[{"age":"asc"}]) + docs1 = self.db.find( + {"age": {"$gt": 0, "$exists": True}}, sort=[{"age": "asc"}] + ) docs2 = list(sorted(docs1, key=lambda d: d["age"])) assert docs1 is not docs2 and docs1 == docs2 def test_sort_desc_complex_error(self): try: - self.db.find({ - "company": {"$lt": "M"}, - "$or": [ - {"company": "Dreamia"}, - {"manager": True} - ] - }, sort=[{"company":"desc"}]) + self.db.find( + { + "company": {"$lt": "M"}, + "$or": [{"company": "Dreamia"}, {"manager": True}], + }, + sort=[{"company": "desc"}], + ) except Exception as e: self.assertEqual(e.response.status_code, 400) resp = e.response.json() @@ -294,39 +268,25 @@ def test_r(self): def test_empty(self): docs = self.db.find({}) - # 15 users + # 15 users assert len(docs) == 15 def test_empty_subsel(self): - docs = self.db.find({ - "_id": {"$gt": None}, - "location": {} - }) + docs = self.db.find({"_id": {"$gt": None}, "location": {}}) assert len(docs) == 0 def test_empty_subsel_match(self): self.db.save_docs([{"user_id": "eo", "empty_obj": {}}]) - docs = self.db.find({ - "_id": {"$gt": None}, - "empty_obj": {} - }) + docs = self.db.find({"_id": {"$gt": None}, "empty_obj": {}}) assert len(docs) == 1 assert docs[0]["user_id"] == "eo" def test_unsatisfiable_range(self): - docs = self.db.find({ - "$and":[ - {"age":{"$gt": 0}}, - {"age":{"$lt": 0}} - ] - }) + docs = self.db.find({"$and": [{"age": {"$gt": 0}}, {"age": {"$lt": 0}}]}) assert len(docs) == 0 def test_explain_view_args(self): - explain = self.db.find({ - "age":{"$gt": 0} - }, fields=["manager"], - explain=True) + explain = self.db.find({"age": {"$gt": 0}}, fields=["manager"], explain=True) assert explain["mrargs"]["stable"] == False assert explain["mrargs"]["update"] == True assert explain["mrargs"]["reduce"] == False @@ -335,8 +295,7 @@ def test_explain_view_args(self): assert explain["mrargs"]["include_docs"] == True def test_sort_with_all_docs(self): - explain = self.db.find({ - "_id": {"$gt": 0}, - "age": {"$gt": 0} - }, sort=["_id"], explain=True) + explain = self.db.find( + {"_id": {"$gt": 0}, "age": {"$gt": 0}}, sort=["_id"], explain=True + ) self.assertEqual(explain["index"]["type"], "special") diff --git a/src/mango/test/03-operator-test.py b/src/mango/test/03-operator-test.py index 4650c7e84e0..935f470bb97 100644 --- a/src/mango/test/03-operator-test.py +++ b/src/mango/test/03-operator-test.py @@ -13,8 +13,8 @@ import mango import unittest -class OperatorTests: +class OperatorTests: def assertUserIds(self, user_ids, docs): user_ids_returned = list(d["user_id"] for d in docs) user_ids.sort() @@ -22,115 +22,58 @@ def assertUserIds(self, user_ids, docs): self.assertEqual(user_ids, user_ids_returned) def test_all(self): - docs = self.db.find({ - "manager": True, - "favorites": {"$all": ["Lisp", "Python"]} - }) + docs = self.db.find( + {"manager": True, "favorites": {"$all": ["Lisp", "Python"]}} + ) self.assertEqual(len(docs), 3) - user_ids = [2,12,9] + user_ids = [2, 12, 9] self.assertUserIds(user_ids, docs) def test_all_non_array(self): - docs = self.db.find({ - "manager": True, - "location": {"$all": ["Ohai"]} - }) + docs = self.db.find({"manager": True, "location": {"$all": ["Ohai"]}}) self.assertEqual(len(docs), 0) def test_elem_match(self): emdocs = [ - { - "user_id": "a", - "bang": [{ - "foo": 1, - "bar": 2 - }] - }, - { - "user_id": "b", - "bang": [{ - "foo": 2, - "bam": True - }] - } + {"user_id": "a", "bang": [{"foo": 1, "bar": 2}]}, + {"user_id": "b", "bang": [{"foo": 2, "bam": True}]}, ] self.db.save_docs(emdocs, w=3) - docs = self.db.find({ - "_id": {"$gt": None}, - "bang": {"$elemMatch": { - "foo": {"$gte": 1}, - "bam": True - }} - }) + docs = self.db.find( + { + "_id": {"$gt": None}, + "bang": {"$elemMatch": {"foo": {"$gte": 1}, "bam": True}}, + } + ) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["user_id"], "b") def test_all_match(self): amdocs = [ - { - "user_id": "a", - "bang": [ - { - "foo": 1, - "bar": 2 - }, - { - "foo": 3, - "bar": 4 - } - ] - }, - { - "user_id": "b", - "bang": [ - { - "foo": 1, - "bar": 2 - }, - { - "foo": 4, - "bar": 4 - } - ] - } + {"user_id": "a", "bang": [{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}]}, + {"user_id": "b", "bang": [{"foo": 1, "bar": 2}, {"foo": 4, "bar": 4}]}, ] self.db.save_docs(amdocs, w=3) - docs = self.db.find({ - "bang": {"$allMatch": { - "foo": {"$mod": [2,1]}, - "bar": {"$mod": [2,0]} - }} - }) + docs = self.db.find( + {"bang": {"$allMatch": {"foo": {"$mod": [2, 1]}, "bar": {"$mod": [2, 0]}}}} + ) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["user_id"], "a") - + def test_empty_all_match(self): - amdocs = [ - { - "bad_doc": "a", - "emptybang": [] - } - ] + amdocs = [{"bad_doc": "a", "emptybang": []}] self.db.save_docs(amdocs, w=3) - docs = self.db.find({ - "emptybang": {"$allMatch": { - "foo": {"$eq": 2} - }} - }) + docs = self.db.find({"emptybang": {"$allMatch": {"foo": {"$eq": 2}}}}) self.assertEqual(len(docs), 0) def test_in_operator_array(self): - docs = self.db.find({ - "manager": True, - "favorites": {"$in": ["Ruby", "Python"]} - }) - self.assertUserIds([2,6,7,9,11,12], docs) + docs = self.db.find({"manager": True, "favorites": {"$in": ["Ruby", "Python"]}}) + self.assertUserIds([2, 6, 7, 9, 11, 12], docs) def test_nin_operator_array(self): - docs = self.db.find({ - "manager": True, - "favorites": {"$nin": ["Erlang", "Python"]} - }) + docs = self.db.find( + {"manager": True, "favorites": {"$nin": ["Erlang", "Python"]}} + ) self.assertEqual(len(docs), 4) for doc in docs: if isinstance(doc["favorites"], list): @@ -138,120 +81,99 @@ def test_nin_operator_array(self): self.assertNotIn("Python", doc["favorites"]) def test_regex(self): - docs = self.db.find({ - "age": {"$gt": 40}, - "location.state": {"$regex": "(?i)new.*"} - }) + docs = self.db.find( + {"age": {"$gt": 40}, "location.state": {"$regex": "(?i)new.*"}} + ) self.assertEqual(len(docs), 2) - self.assertUserIds([2,10], docs) + self.assertUserIds([2, 10], docs) def test_exists_false(self): - docs = self.db.find({ - "age": {"$gt": 0}, - "twitter": {"$exists": False} - }) - user_ids = [2,3,5,6,7,8,10,11,12,14] + docs = self.db.find({"age": {"$gt": 0}, "twitter": {"$exists": False}}) + user_ids = [2, 3, 5, 6, 7, 8, 10, 11, 12, 14] self.assertUserIds(user_ids, docs) for d in docs: self.assertNotIn("twitter", d) def test_eq_null_does_not_include_missing(self): - docs = self.db.find({ - "age": {"$gt": 0}, - "twitter": None - }) + docs = self.db.find({"age": {"$gt": 0}, "twitter": None}) user_ids = [9] self.assertUserIds(user_ids, docs) for d in docs: self.assertEqual(d["twitter"], None) def test_ne_includes_null_but_not_missing(self): - docs = self.db.find({ - "twitter": {"$ne": "notamatch"} - }) - user_ids = [0,1,4,9,13] + docs = self.db.find({"twitter": {"$ne": "notamatch"}}) + user_ids = [0, 1, 4, 9, 13] self.assertUserIds(user_ids, docs) for d in docs: self.assertIn("twitter", d) # ideally this work be consistent across index types but, alas, it is not - @unittest.skipUnless(not mango.has_text_service(), - "text indexes do not support range queries across type boundaries") + @unittest.skipUnless( + not mango.has_text_service(), + "text indexes do not support range queries across type boundaries", + ) def test_lt_includes_null_but_not_missing(self): - docs = self.db.find({ - "twitter": {"$lt": 1} - }) + docs = self.db.find({"twitter": {"$lt": 1}}) user_ids = [9] self.assertUserIds(user_ids, docs) for d in docs: self.assertEqual(d["twitter"], None) - @unittest.skipUnless(not mango.has_text_service(), - "text indexes do not support range queries across type boundaries") + @unittest.skipUnless( + not mango.has_text_service(), + "text indexes do not support range queries across type boundaries", + ) def test_lte_includes_null_but_not_missing(self): - docs = self.db.find({ - "twitter": {"$lt": 1} - }) + docs = self.db.find({"twitter": {"$lt": 1}}) user_ids = [9] self.assertUserIds(user_ids, docs) for d in docs: self.assertEqual(d["twitter"], None) def test_lte_null_includes_null_but_not_missing(self): - docs = self.db.find({ - "twitter": {"$lte": None} - }) + docs = self.db.find({"twitter": {"$lte": None}}) user_ids = [9] self.assertUserIds(user_ids, docs) for d in docs: self.assertEqual(d["twitter"], None) def test_lte_at_z_except_null_excludes_null_and_missing(self): - docs = self.db.find({ - "twitter": {"$and": [ - {"$lte": "@z"}, - {"$ne": None} - ]} - }) - user_ids = [0,1,4,13] + docs = self.db.find({"twitter": {"$and": [{"$lte": "@z"}, {"$ne": None}]}}) + user_ids = [0, 1, 4, 13] self.assertUserIds(user_ids, docs) for d in docs: self.assertNotEqual(d["twitter"], None) def test_range_gte_null_includes_null_but_not_missing(self): - docs = self.db.find({ - "twitter": {"$gte": None} - }) + docs = self.db.find({"twitter": {"$gte": None}}) self.assertGreater(len(docs), 0) for d in docs: self.assertIn("twitter", d) def test_exists_false_returns_missing_but_not_null(self): - docs = self.db.find({ - "twitter": {"$exists": False} - }) + docs = self.db.find({"twitter": {"$exists": False}}) self.assertGreater(len(docs), 0) for d in docs: self.assertNotIn("twitter", d) - - @unittest.skipUnless(not mango.has_text_service(), - "text indexes do not support range queries across type boundaries") + + @unittest.skipUnless( + not mango.has_text_service(), + "text indexes do not support range queries across type boundaries", + ) def test_lte_respsects_unicode_collation(self): - docs = self.db.find({ - "ordered": {"$lte": "a"} - }) - user_ids = [7,8,9,10,11,12] + docs = self.db.find({"ordered": {"$lte": "a"}}) + user_ids = [7, 8, 9, 10, 11, 12] self.assertUserIds(user_ids, docs) - - @unittest.skipUnless(not mango.has_text_service(), - "text indexes do not support range queries across type boundaries") + + @unittest.skipUnless( + not mango.has_text_service(), + "text indexes do not support range queries across type boundaries", + ) def test_gte_respsects_unicode_collation(self): - docs = self.db.find({ - "ordered": {"$gte": "a"} - }) - user_ids = [12,13,14] + docs = self.db.find({"ordered": {"$gte": "a"}}) + user_ids = [12, 13, 14] self.assertUserIds(user_ids, docs) - class OperatorJSONTests(mango.UserDocsTests, OperatorTests): @@ -266,11 +188,7 @@ class OperatorTextTests(mango.UserDocsTextTests, OperatorTests): class OperatorAllDocsTests(mango.UserDocsTestsNoIndexes, OperatorTests): def test_range_id_eq(self): doc_id = "8e1c90c0-ac18-4832-8081-40d14325bde0" - r = self.db.find({ - "_id": doc_id - }, explain=True, return_raw=True) - + r = self.db.find({"_id": doc_id}, explain=True, return_raw=True) + self.assertEqual(r["mrargs"]["end_key"], doc_id) self.assertEqual(r["mrargs"]["start_key"], doc_id) - - diff --git a/src/mango/test/04-key-tests.py b/src/mango/test/04-key-tests.py index 29451912d85..a9551c6f865 100644 --- a/src/mango/test/04-key-tests.py +++ b/src/mango/test/04-key-tests.py @@ -16,40 +16,29 @@ import unittest TEST_DOCS = [ - { - "type": "complex_key", - "title": "normal key" - }, + {"type": "complex_key", "title": "normal key"}, { "type": "complex_key", "title": "key with dot", "dot.key": "dot's value", - "none": { - "dot": "none dot's value" - }, - "name.first" : "Kvothe" + "none": {"dot": "none dot's value"}, + "name.first": "Kvothe", }, { "type": "complex_key", "title": "key with peso", "$key": "peso", - "deep": { - "$key": "deep peso" - }, - "name": {"first" : "Master Elodin"} - }, - { - "type": "complex_key", - "title": "unicode key", - "": "apple" + "deep": {"$key": "deep peso"}, + "name": {"first": "Master Elodin"}, }, + {"type": "complex_key", "title": "unicode key", "": "apple"}, { "title": "internal_fields_format", - "utf8-1[]:string" : "string", - "utf8-2[]:boolean[]" : True, - "utf8-3[]:number" : 9, - "utf8-3[]:null" : None - } + "utf8-1[]:string": "string", + "utf8-2[]:boolean[]": True, + "utf8-3[]:number": 9, + "utf8-3[]:null": None, + }, ] @@ -73,33 +62,39 @@ def run_check(self, query, check, fields=None, indexes=None): def test_dot_key(self): query = {"type": "complex_key"} fields = ["title", "dot\\.key", "none.dot"] + def check(docs): assert len(docs) == 4 assert "dot.key" in docs[1] assert docs[1]["dot.key"] == "dot's value" assert "none" in docs[1] assert docs[1]["none"]["dot"] == "none dot's value" + self.run_check(query, check, fields=fields) def test_peso_key(self): query = {"type": "complex_key"} fields = ["title", "$key", "deep.$key"] + def check(docs): assert len(docs) == 4 assert "$key" in docs[2] assert docs[2]["$key"] == "peso" assert "deep" in docs[2] assert docs[2]["deep"]["$key"] == "deep peso" + self.run_check(query, check, fields=fields) def test_unicode_in_fieldname(self): query = {"type": "complex_key"} fields = ["title", ""] + def check(docs): assert len(docs) == 4 # note:  == \uf8ff - assert '\uf8ff' in docs[3] - assert docs[3]['\uf8ff'] == "apple" + assert "\uf8ff" in docs[3] + assert docs[3]["\uf8ff"] == "apple" + self.run_check(query, check, fields=fields) # The rest of these tests are only run against the text @@ -107,45 +102,57 @@ def check(docs): # field *name* escaping in the index. def test_unicode_in_selector_field(self): - query = {"" : "apple"} + query = {"": "apple"} + def check(docs): assert len(docs) == 1 assert docs[0]["\uf8ff"] == "apple" + self.run_check(query, check, indexes=["text"]) def test_internal_field_tests(self): queries = [ - {"utf8-1[]:string" : "string"}, - {"utf8-2[]:boolean[]" : True}, - {"utf8-3[]:number" : 9}, - {"utf8-3[]:null" : None} + {"utf8-1[]:string": "string"}, + {"utf8-2[]:boolean[]": True}, + {"utf8-3[]:number": 9}, + {"utf8-3[]:null": None}, ] + def check(docs): assert len(docs) == 1 assert docs[0]["title"] == "internal_fields_format" + for query in queries: self.run_check(query, check, indexes=["text"]) def test_escape_period(self): - query = {"name\\.first" : "Kvothe"} + query = {"name\\.first": "Kvothe"} + def check(docs): assert len(docs) == 1 assert docs[0]["name.first"] == "Kvothe" + self.run_check(query, check, indexes=["text"]) - query = {"name.first" : "Kvothe"} + query = {"name.first": "Kvothe"} + def check_empty(docs): assert len(docs) == 0 + self.run_check(query, check_empty, indexes=["text"]) def test_object_period(self): - query = {"name.first" : "Master Elodin"} + query = {"name.first": "Master Elodin"} + def check(docs): assert len(docs) == 1 assert docs[0]["title"] == "key with peso" + self.run_check(query, check, indexes=["text"]) - query = {"name\\.first" : "Master Elodin"} + query = {"name\\.first": "Master Elodin"} + def check_empty(docs): assert len(docs) == 0 + self.run_check(query, check_empty, indexes=["text"]) diff --git a/src/mango/test/05-index-selection-test.py b/src/mango/test/05-index-selection-test.py index 2a40fda3835..e7ea329c6f9 100644 --- a/src/mango/test/05-index-selection-test.py +++ b/src/mango/test/05-index-selection-test.py @@ -16,59 +16,61 @@ class IndexSelectionTests: - def test_basic(self): resp = self.db.find({"age": 123}, explain=True) self.assertEqual(resp["index"]["type"], "json") def test_with_and(self): - resp = self.db.find({ + resp = self.db.find( + { "name.first": "Stephanie", - "name.last": "This doesn't have to match anything." - }, explain=True) + "name.last": "This doesn't have to match anything.", + }, + explain=True, + ) self.assertEqual(resp["index"]["type"], "json") def test_with_nested_and(self): - resp = self.db.find({ - "name.first": { - "$gt": "a", - "$lt": "z" - }, - "name.last": "Foo" - }, explain=True) + resp = self.db.find( + {"name.first": {"$gt": "a", "$lt": "z"}, "name.last": "Foo"}, explain=True + ) self.assertEqual(resp["index"]["type"], "json") def test_with_or(self): # index on ["company","manager"] ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" - resp = self.db.find({ - "company": { - "$gt": "a", - "$lt": "z" - }, - "$or": [ - {"manager": "Foo"}, - {"manager": "Bar"} - ] - }, explain=True) + resp = self.db.find( + { + "company": {"$gt": "a", "$lt": "z"}, + "$or": [{"manager": "Foo"}, {"manager": "Bar"}], + }, + explain=True, + ) self.assertEqual(resp["index"]["ddoc"], ddocid) def test_use_most_columns(self): # ddoc id for the age index ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f" - resp = self.db.find({ + resp = self.db.find( + { "name.first": "Stephanie", "name.last": "Something or other", - "age": {"$gt": 1} - }, explain=True) + "age": {"$gt": 1}, + }, + explain=True, + ) self.assertNotEqual(resp["index"]["ddoc"], "_design/" + ddocid) - resp = self.db.find({ + resp = self.db.find( + { "name.first": "Stephanie", "name.last": "Something or other", - "age": {"$gt": 1} - }, use_index=ddocid, explain=True) + "age": {"$gt": 1}, + }, + use_index=ddocid, + explain=True, + ) self.assertEqual(resp["index"]["ddoc"], ddocid) def test_no_valid_sort_index(self): @@ -83,16 +85,19 @@ def test_invalid_use_index(self): # ddoc id for the age index ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f" r = self.db.find({}, use_index=ddocid, return_raw=True) - self.assertEqual(r["warning"], '{0} was not used because it does not contain a valid index for this query.'.format(ddocid)) + self.assertEqual( + r["warning"], + "{0} was not used because it does not contain a valid index for this query.".format( + ddocid + ), + ) def test_uses_index_when_no_range_or_equals(self): # index on ["manager"] should be valid because # selector requires "manager" to exist. The # selector doesn't narrow the keyrange so it's # a full index scan - selector = { - "manager": {"$exists": True} - } + selector = {"manager": {"$exists": True}} docs = self.db.find(selector) self.assertEqual(len(docs), 14) @@ -102,12 +107,15 @@ def test_uses_index_when_no_range_or_equals(self): def test_reject_use_index_invalid_fields(self): # index on ["company","manager"] which should not be valid ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" - selector = { - "company": "Pharmex" - } + selector = {"company": "Pharmex"} r = self.db.find(selector, use_index=ddocid, return_raw=True) - self.assertEqual(r["warning"], '{0} was not used because it does not contain a valid index for this query.'.format(ddocid)) - + self.assertEqual( + r["warning"], + "{0} was not used because it does not contain a valid index for this query.".format( + ddocid + ), + ) + # should still return a correct result for d in r["docs"]: self.assertEqual(d["company"], "Pharmex") @@ -116,12 +124,15 @@ def test_reject_use_index_ddoc_and_name_invalid_fields(self): # index on ["company","manager"] which should not be valid ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" name = "a0c425a60cf3c3c09e3c537c9ef20059dcef9198" - selector = { - "company": "Pharmex" - } - - resp = self.db.find(selector, use_index=[ddocid,name], return_raw=True) - self.assertEqual(resp["warning"], "{0}, {1} was not used because it is not a valid index for this query.".format(ddocid, name)) + selector = {"company": "Pharmex"} + + resp = self.db.find(selector, use_index=[ddocid, name], return_raw=True) + self.assertEqual( + resp["warning"], + "{0}, {1} was not used because it is not a valid index for this query.".format( + ddocid, name + ), + ) # should still return a correct result for d in resp["docs"]: @@ -131,11 +142,9 @@ def test_reject_use_index_sort_order(self): # index on ["company","manager"] which should not be valid # and there is no valid fallback (i.e. an index on ["company"]) ddocid = "_design/a0c425a60cf3c3c09e3c537c9ef20059dcef9198" - selector = { - "company": {"$gt": None} - } + selector = {"company": {"$gt": None}} try: - self.db.find(selector, use_index=ddocid, sort=[{"company":"desc"}]) + self.db.find(selector, use_index=ddocid, sort=[{"company": "desc"}]) except Exception as e: self.assertEqual(e.response.status_code, 400) else: @@ -146,15 +155,22 @@ def test_use_index_fallback_if_valid_sort(self): ddocid_invalid = "_design/fallbackfoobar" self.db.create_index(fields=["foo"], ddoc=ddocid_invalid) self.db.create_index(fields=["foo", "bar"], ddoc=ddocid_valid) - selector = { - "foo": {"$gt": None} - } - - resp_explain = self.db.find(selector, sort=["foo", "bar"], use_index=ddocid_invalid, explain=True) - self.assertEqual(resp_explain["index"]["ddoc"], ddocid_valid) - - resp = self.db.find(selector, sort=["foo", "bar"], use_index=ddocid_invalid, return_raw=True) - self.assertEqual(resp["warning"], '{0} was not used because it does not contain a valid index for this query.'.format(ddocid_invalid)) + selector = {"foo": {"$gt": None}} + + resp_explain = self.db.find( + selector, sort=["foo", "bar"], use_index=ddocid_invalid, explain=True + ) + self.assertEqual(resp_explain["index"]["ddoc"], ddocid_valid) + + resp = self.db.find( + selector, sort=["foo", "bar"], use_index=ddocid_invalid, return_raw=True + ) + self.assertEqual( + resp["warning"], + "{0} was not used because it does not contain a valid index for this query.".format( + ddocid_invalid + ), + ) self.assertEqual(len(resp["docs"]), 0) def test_prefer_use_index_over_optimal_index(self): @@ -162,10 +178,7 @@ def test_prefer_use_index_over_optimal_index(self): ddocid_preferred = "_design/testsuboptimal" self.db.create_index(fields=["baz"], ddoc=ddocid_preferred) self.db.create_index(fields=["baz", "bar"]) - selector = { - "baz": {"$gt": None}, - "bar": {"$gt": None} - } + selector = {"baz": {"$gt": None}, "bar": {"$gt": None}} resp = self.db.find(selector, use_index=ddocid_preferred, return_raw=True) self.assertTrue("warning" not in resp) @@ -180,45 +193,30 @@ def test_manual_bad_view_idx01(self): "language": "query", "views": { "queryidx1": { - "map": { - "fields": { - "age": "asc" - } - }, + "map": {"fields": {"age": "asc"}}, "reduce": "_count", - "options": { - "def": { - "fields": [ - { - "age": "asc" - } - ] - }, - "w": 2 - } + "options": {"def": {"fields": [{"age": "asc"}]}, "w": 2}, } }, - "views" : { - "views001" : { - "map" : "function(employee){if(employee.training)" + "views": { + "views001": { + "map": "function(employee){if(employee.training)" + "{emit(employee.number, employee.training);}}" } - } + }, } with self.assertRaises(KeyError): self.db.save_doc(design_doc) - def test_explain_sort_reverse(self): - selector = { - "manager": {"$gt": None} - } - resp_explain = self.db.find(selector, fields=["manager"], sort=[{"manager":"desc"}], explain=True) + selector = {"manager": {"$gt": None}} + resp_explain = self.db.find( + selector, fields=["manager"], sort=[{"manager": "desc"}], explain=True + ) self.assertEqual(resp_explain["index"]["type"], "json") - -class JSONIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests): +class JSONIndexSelectionTests(mango.UserDocsTests, IndexSelectionTests): @classmethod def setUpClass(klass): super(JSONIndexSelectionTests, klass).setUpClass() @@ -227,14 +225,12 @@ def test_uses_all_docs_when_fields_do_not_match_selector(self): # index exists on ["company", "manager"] but not ["company"] # so we should fall back to all docs (so we include docs # with no "manager" field) - selector = { - "company": "Pharmex" - } + selector = {"company": "Pharmex"} docs = self.db.find(selector) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["company"], "Pharmex") self.assertNotIn("manager", docs[0]) - + resp_explain = self.db.find(selector, explain=True) self.assertEqual(resp_explain["index"]["type"], "special") @@ -242,10 +238,7 @@ def test_uses_all_docs_when_fields_do_not_match_selector(self): def test_uses_all_docs_when_selector_doesnt_require_fields_to_exist(self): # as in test above, use a selector that doesn't overlap with the index # due to an explicit exists clause - selector = { - "company": "Pharmex", - "manager": {"$exists": False} - } + selector = {"company": "Pharmex", "manager": {"$exists": False}} docs = self.db.find(selector) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["company"], "Pharmex") @@ -257,7 +250,6 @@ def test_uses_all_docs_when_selector_doesnt_require_fields_to_exist(self): @unittest.skipUnless(mango.has_text_service(), "requires text service") class TextIndexSelectionTests(mango.UserDocsTests): - @classmethod def setUpClass(klass): super(TextIndexSelectionTests, klass).setUpClass() @@ -265,11 +257,14 @@ def setUpClass(klass): user_docs.add_text_indexes(klass.db, {}) def test_with_text(self): - resp = self.db.find({ - "$text" : "Stephanie", + resp = self.db.find( + { + "$text": "Stephanie", "name.first": "Stephanie", - "name.last": "This doesn't have to match anything." - }, explain=True) + "name.last": "This doesn't have to match anything.", + }, + explain=True, + ) self.assertEqual(resp["index"]["type"], "text") def test_no_view_index(self): @@ -277,42 +272,43 @@ def test_no_view_index(self): self.assertEqual(resp["index"]["type"], "text") def test_with_or(self): - resp = self.db.find({ + resp = self.db.find( + { "$or": [ {"name.first": "Stephanie"}, - {"name.last": "This doesn't have to match anything."} + {"name.last": "This doesn't have to match anything."}, ] - }, explain=True) + }, + explain=True, + ) self.assertEqual(resp["index"]["type"], "text") - + def test_manual_bad_text_idx(self): design_doc = { "_id": "_design/bad_text_index", "language": "query", "indexes": { - "text_index": { - "default_analyzer": "keyword", - "default_field": {}, - "selector": {}, - "fields": "all_fields", - "analyzer": { + "text_index": { + "default_analyzer": "keyword", + "default_field": {}, + "selector": {}, + "fields": "all_fields", + "analyzer": { "name": "perfield", "default": "keyword", - "fields": { - "$default": "standard" - } - } + "fields": {"$default": "standard"}, + }, } }, "indexes": { "st_index": { "analyzer": "standard", - "index": "function(doc){\n index(\"st_index\", doc.geometry);\n}" + "index": 'function(doc){\n index("st_index", doc.geometry);\n}', } - } + }, } self.db.save_doc(design_doc) - docs= self.db.find({"age" : 48}) + docs = self.db.find({"age": 48}) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["name"]["first"], "Stephanie") self.assertEqual(docs[0]["age"], 48) @@ -328,7 +324,9 @@ def setUpClass(klass): klass.db.create_text_index(ddoc="bar", analyzer="email") def test_fallback_to_json_with_multi_text(self): - resp = self.db.find({"name.first": "A first name", "name.last": "A last name"}, explain=True) + resp = self.db.find( + {"name.first": "A first name", "name.last": "A last name"}, explain=True + ) self.assertEqual(resp["index"]["type"], "json") def test_multi_text_index_is_error(self): diff --git a/src/mango/test/06-basic-text-test.py b/src/mango/test/06-basic-text-test.py index d48948bae72..db7cf32cb8d 100644 --- a/src/mango/test/06-basic-text-test.py +++ b/src/mango/test/06-basic-text-test.py @@ -18,21 +18,17 @@ from hypothesis import given, assume, example import hypothesis.strategies as st + @unittest.skipIf(mango.has_text_service(), "text service exists") class TextIndexCheckTests(mango.DbPerClass): - def test_create_text_index(self): - body = json.dumps({ - 'index': { - }, - 'type': 'text' - }) + body = json.dumps({"index": {}, "type": "text"}) resp = self.db.sess.post(self.db.path("_index"), data=body) assert resp.status_code == 503, resp + @unittest.skipUnless(mango.has_text_service(), "requires text service") class BasicTextTests(mango.UserDocsTextTests): - def test_simple(self): docs = self.db.find({"$text": "Stephanie"}) assert len(docs) == 1 @@ -227,34 +223,18 @@ def test_or(self): assert docs[0]["user_id"] == 9 def test_and_or(self): - q = { - "age": 22, - "$or": [ - {"manager": False}, - {"location.state": "Missouri"} - ] - } + q = {"age": 22, "$or": [{"manager": False}, {"location.state": "Missouri"}]} docs = self.db.find(q) assert len(docs) == 1 assert docs[0]["user_id"] == 9 - q = { - "$or": [ - {"age": 22}, - {"age": 43, "manager": True} - ] - } + q = {"$or": [{"age": 22}, {"age": 43, "manager": True}]} docs = self.db.find(q) assert len(docs) == 2 for d in docs: assert d["user_id"] in (9, 10) - q = { - "$or": [ - {"$text": "Ramona"}, - {"age": 43, "manager": True} - ] - } + q = {"$or": [{"$text": "Ramona"}, {"age": 43, "manager": True}]} docs = self.db.find(q) assert len(docs) == 2 for d in docs: @@ -403,18 +383,22 @@ def test_exists_object_member(self): assert d["user_id"] != 11 def test_exists_and(self): - q = {"$and": [ - {"manager": {"$exists": True}}, - {"exists_object.should": {"$exists": True}} - ]} + q = { + "$and": [ + {"manager": {"$exists": True}}, + {"exists_object.should": {"$exists": True}}, + ] + } docs = self.db.find(q) assert len(docs) == 1 assert docs[0]["user_id"] == 11 - q = {"$and": [ - {"manager": {"$exists": False}}, - {"exists_object.should": {"$exists": True}} - ]} + q = { + "$and": [ + {"manager": {"$exists": False}}, + {"exists_object.should": {"$exists": True}}, + ] + } docs = self.db.find(q) assert len(docs) == 0 @@ -425,30 +409,25 @@ def test_exists_and(self): assert len(docs) == len(user_docs.DOCS) def test_value_chars(self): - q = {"complex_field_value": "+-(){}[]^~&&*||\"\\/?:!"} + q = {"complex_field_value": '+-(){}[]^~&&*||"\\/?:!'} docs = self.db.find(q) assert len(docs) == 1 def test_regex(self): - docs = self.db.find({ - "age": {"$gt": 40}, - "location.state": {"$regex": "(?i)new.*"} - }) + docs = self.db.find( + {"age": {"$gt": 40}, "location.state": {"$regex": "(?i)new.*"}} + ) assert len(docs) == 2 assert docs[0]["user_id"] == 2 assert docs[1]["user_id"] == 10 # test lucene syntax in $text + @unittest.skipUnless(mango.has_text_service(), "requires text service") class ElemMatchTests(mango.FriendDocsTextTests): - def test_elem_match_non_object(self): - q = {"bestfriends":{ - "$elemMatch": - {"$eq":"Wolverine", "$eq":"Cyclops"} - } - } + q = {"bestfriends": {"$elemMatch": {"$eq": "Wolverine", "$eq": "Cyclops"}}} docs = self.db.find(q) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["bestfriends"], ["Wolverine", "Cyclops"]) @@ -460,35 +439,19 @@ def test_elem_match_non_object(self): self.assertEqual(docs[0]["results"], [82, 85, 88]) def test_elem_match(self): - q = {"friends": { - "$elemMatch": - {"name.first": "Vargas"} - } - } + q = {"friends": {"$elemMatch": {"name.first": "Vargas"}}} docs = self.db.find(q) self.assertEqual(len(docs), 2) for d in docs: self.assertIn(d["user_id"], (0, 1)) - q = { - "friends": { - "$elemMatch": { - "name.first": "Ochoa", - "name.last": "Burch" - } - } - } + q = {"friends": {"$elemMatch": {"name.first": "Ochoa", "name.last": "Burch"}}} docs = self.db.find(q) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["user_id"], 4) - # Check that we can do logic in elemMatch - q = { - "friends": {"$elemMatch": { - "name.first": "Ochoa", "type": "work" - }} - } + q = {"friends": {"$elemMatch": {"name.first": "Ochoa", "type": "work"}}} docs = self.db.find(q) self.assertEqual(len(docs), 2) for d in docs: @@ -498,10 +461,7 @@ def test_elem_match(self): "friends": { "$elemMatch": { "name.first": "Ochoa", - "$or": [ - {"type": "work"}, - {"type": "personal"} - ] + "$or": [{"type": "work"}, {"type": "personal"}], } } } @@ -515,7 +475,7 @@ def test_elem_match(self): "friends": { "$elemMatch": { "name.first": "Ochoa", - "type": {"$in": ["work", "personal"]} + "type": {"$in": ["work", "personal"]}, } } } @@ -525,59 +485,37 @@ def test_elem_match(self): self.assertIn(d["user_id"], (1, 4, 15)) q = { - "$and": [{ - "friends": { - "$elemMatch": { - "id": 0, - "name": { - "$exists": True - } - } - } - }, + "$and": [ + {"friends": {"$elemMatch": {"id": 0, "name": {"$exists": True}}}}, { - "friends": { - "$elemMatch": { - "$or": [ - { - "name": { - "first": "Campos", - "last": "Freeman" - } - }, - { - "name": { - "$in": [{ - "first": "Gibbs", - "last": "Mccarty" - }, - { - "first": "Wilkins", - "last": "Chang" - } - ] + "friends": { + "$elemMatch": { + "$or": [ + {"name": {"first": "Campos", "last": "Freeman"}}, + { + "name": { + "$in": [ + {"first": "Gibbs", "last": "Mccarty"}, + {"first": "Wilkins", "last": "Chang"}, + ] } - } + }, ] } } - } + }, ] } docs = self.db.find(q) self.assertEqual(len(docs), 3) for d in docs: - self.assertIn(d["user_id"], (10, 11,12)) + self.assertIn(d["user_id"], (10, 11, 12)) + @unittest.skipUnless(mango.has_text_service(), "requires text service") class AllMatchTests(mango.FriendDocsTextTests): - def test_all_match(self): - q = {"friends": { - "$allMatch": - {"type": "personal"} - } - } + q = {"friends": {"$allMatch": {"type": "personal"}}} docs = self.db.find(q) assert len(docs) == 2 for d in docs: @@ -588,10 +526,7 @@ def test_all_match(self): "friends": { "$allMatch": { "name.first": "Ochoa", - "$or": [ - {"type": "work"}, - {"type": "personal"} - ] + "$or": [{"type": "work"}, {"type": "personal"}], } } } @@ -604,7 +539,7 @@ def test_all_match(self): "friends": { "$allMatch": { "name.first": "Ochoa", - "type": {"$in": ["work", "personal"]} + "type": {"$in": ["work", "personal"]}, } } } @@ -616,7 +551,6 @@ def test_all_match(self): # Test numeric strings for $text @unittest.skipUnless(mango.has_text_service(), "requires text service") class NumStringTests(mango.DbPerClass): - @classmethod def setUpClass(klass): super(NumStringTests, klass).setUpClass() @@ -628,11 +562,10 @@ def setUpClass(klass): def isFinite(num): not (math.isinf(num) or math.isnan(num)) - @given(f=st.floats().filter(isFinite).map(str) - | st.floats().map(lambda f: f.hex())) - @example('NaN') - @example('Infinity') - def test_floating_point_val(self,f): + @given(f=st.floats().filter(isFinite).map(str) | st.floats().map(lambda f: f.hex())) + @example("NaN") + @example("Infinity") + def test_floating_point_val(self, f): doc = {"number_string": f} self.db.save_doc(doc) q = {"$text": f} diff --git a/src/mango/test/06-text-default-field-test.py b/src/mango/test/06-text-default-field-test.py index 3f86f0e4151..7fdbd747d92 100644 --- a/src/mango/test/06-text-default-field-test.py +++ b/src/mango/test/06-text-default-field-test.py @@ -33,10 +33,7 @@ def test_other_fields_exist(self): @unittest.skipUnless(mango.has_text_service(), "requires text service") class NoDefaultFieldWithAnalyzer(mango.UserDocsTextTests): - DEFAULT_FIELD = { - "enabled": False, - "analyzer": "keyword" - } + DEFAULT_FIELD = {"enabled": False, "analyzer": "keyword"} def test_basic(self): docs = self.db.find({"$text": "Ramona"}) @@ -51,10 +48,7 @@ def test_other_fields_exist(self): @unittest.skipUnless(mango.has_text_service(), "requires text service") class DefaultFieldWithCustomAnalyzer(mango.UserDocsTextTests): - DEFAULT_FIELD = { - "enabled": True, - "analyzer": "keyword" - } + DEFAULT_FIELD = {"enabled": True, "analyzer": "keyword"} def test_basic(self): docs = self.db.find({"$text": "Ramona"}) diff --git a/src/mango/test/07-text-custom-field-list-test.py b/src/mango/test/07-text-custom-field-list-test.py index 9bfe0759806..8514111c45b 100644 --- a/src/mango/test/07-text-custom-field-list-test.py +++ b/src/mango/test/07-text-custom-field-list-test.py @@ -25,11 +25,8 @@ class CustomFieldsTest(mango.UserDocsTextTests): # These two are to test the default analyzer for # each field. {"name": "location.state", "type": "string"}, - { - "name": "location.address.street", - "type": "string" - }, - {"name": "name\\.first", "type": "string"} + {"name": "location.address.street", "type": "string"}, + {"name": "name\\.first", "type": "string"}, ] def test_basic(self): @@ -55,10 +52,11 @@ def test_element_acess(self): # favorites.[], and not the string field favorites def test_index_selection(self): try: - self.db.find({"selector": {"$or": [{"favorites": "Ruby"}, - {"favorites.0":"Ruby"}]}}) + self.db.find( + {"selector": {"$or": [{"favorites": "Ruby"}, {"favorites.0": "Ruby"}]}} + ) except Exception as e: - assert e.response.status_code == 400 + assert e.response.status_code == 400 def test_in_with_array(self): vals = ["Lisp", "Python"] @@ -84,7 +82,7 @@ def test_in_different_types(self): try: self.db.find({"favorites": {"$in": vals}}) except Exception as e: - assert e.response.status_code == 400 + assert e.response.status_code == 400 def test_nin_with_array(self): vals = ["Lisp", "Python"] @@ -125,43 +123,44 @@ def test_escaped_field(self): return def test_filtered_search_fields(self): - docs = self.db.find({"age": 22}, fields = ["age", "location.state"]) + docs = self.db.find({"age": 22}, fields=["age", "location.state"]) assert len(docs) == 1 assert docs == [{"age": 22, "location": {"state": "Missouri"}}] - docs = self.db.find({"age": 22}, fields = ["age", "Random Garbage"]) + docs = self.db.find({"age": 22}, fields=["age", "Random Garbage"]) assert len(docs) == 1 assert docs == [{"age": 22}] - docs = self.db.find({"age": 22}, fields = ["favorites"]) + docs = self.db.find({"age": 22}, fields=["favorites"]) assert len(docs) == 1 assert docs == [{"favorites": ["Lisp", "Erlang", "Python"]}] - docs = self.db.find({"age": 22}, fields = ["favorites.[]"]) + docs = self.db.find({"age": 22}, fields=["favorites.[]"]) assert len(docs) == 1 assert docs == [{}] - docs = self.db.find({"age": 22}, fields = ["all_fields"]) + docs = self.db.find({"age": 22}, fields=["all_fields"]) assert len(docs) == 1 assert docs == [{}] def test_two_or(self): - docs = self.db.find({"$or": [{"location.state": "New Hampshire"}, - {"location.state": "Don't Exist"}]}) + docs = self.db.find( + { + "$or": [ + {"location.state": "New Hampshire"}, + {"location.state": "Don't Exist"}, + ] + } + ) assert len(docs) == 1 assert docs[0]["user_id"] == 10 def test_all_match(self): - docs = self.db.find({ - "favorites": { - "$allMatch": { - "$eq": "Erlang" - } - } - }) + docs = self.db.find({"favorites": {"$allMatch": {"$eq": "Erlang"}}}) assert len(docs) == 1 assert docs[0]["user_id"] == 10 + @unittest.skipUnless(mango.has_text_service(), "requires text service") class CustomFieldsExistsTest(mango.UserDocsTextTests): @@ -169,7 +168,7 @@ class CustomFieldsExistsTest(mango.UserDocsTextTests): {"name": "exists_field", "type": "string"}, {"name": "exists_array.[]", "type": "string"}, {"name": "exists_object.should", "type": "string"}, - {"name": "twitter", "type": "string"} + {"name": "twitter", "type": "string"}, ] def test_exists_field(self): @@ -205,8 +204,6 @@ def test_exists_object_member(self): self.assertNotEqual(d["user_id"], 11) def test_exists_false_same_as_views(self): - docs = self.db.find({ - "twitter": {"$exists": False} - }) + docs = self.db.find({"twitter": {"$exists": False}}) for d in docs: self.assertNotIn(d["user_id"], (0, 1, 4, 13)) diff --git a/src/mango/test/08-text-limit-test.py b/src/mango/test/08-text-limit-test.py index 4bc87b4b9e8..ae827813d7c 100644 --- a/src/mango/test/08-text-limit-test.py +++ b/src/mango/test/08-text-limit-test.py @@ -14,32 +14,32 @@ import limit_docs import unittest + @unittest.skipUnless(mango.has_text_service(), "requires text service") class LimitTests(mango.LimitDocsTextTests): - def test_limit_field(self): - q = {"$or": [{"user_id" : {"$lt" : 10}}, {"filtered_array.[]": 1}]} + q = {"$or": [{"user_id": {"$lt": 10}}, {"filtered_array.[]": 1}]} docs = self.db.find(q, limit=10) assert len(docs) == 8 for d in docs: assert d["user_id"] < 10 def test_limit_field2(self): - q = {"$or": [{"user_id" : {"$lt" : 20}}, {"filtered_array.[]": 1}]} + q = {"$or": [{"user_id": {"$lt": 20}}, {"filtered_array.[]": 1}]} docs = self.db.find(q, limit=10) assert len(docs) == 10 for d in docs: assert d["user_id"] < 20 def test_limit_field3(self): - q = {"$or": [{"user_id" : {"$lt" : 100}}, {"filtered_array.[]": 1}]} + q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]} docs = self.db.find(q, limit=1) assert len(docs) == 1 for d in docs: assert d["user_id"] < 100 def test_limit_field4(self): - q = {"$or": [{"user_id" : {"$lt" : 0}}, {"filtered_array.[]": 1}]} + q = {"$or": [{"user_id": {"$lt": 0}}, {"filtered_array.[]": 1}]} docs = self.db.find(q, limit=35) assert len(docs) == 0 @@ -52,29 +52,29 @@ def test_limit_field5(self): assert d["age"] < 100 def test_limit_skip_field1(self): - q = {"$or": [{"user_id" : {"$lt" : 100}}, {"filtered_array.[]": 1}]} + q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]} docs = self.db.find(q, limit=10, skip=20) assert len(docs) == 10 for d in docs: assert d["user_id"] > 20 def test_limit_skip_field2(self): - q = {"$or": [{"user_id" : {"$lt" : 100}}, {"filtered_array.[]": 1}]} + q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]} docs = self.db.find(q, limit=100, skip=100) assert len(docs) == 0 def test_limit_skip_field3(self): - q = {"$or": [{"user_id" : {"$lt" : 20}}, {"filtered_array.[]": 1}]} + q = {"$or": [{"user_id": {"$lt": 20}}, {"filtered_array.[]": 1}]} docs = self.db.find(q, limit=1, skip=30) assert len(docs) == 0 def test_limit_skip_field4(self): - q = {"$or": [{"user_id" : {"$lt" : 100}}, {"filtered_array.[]": 1}]} + q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]} docs = self.db.find(q, limit=0, skip=0) assert len(docs) == 0 def test_limit_skip_field5(self): - q = {"$or": [{"user_id" : {"$lt" : 100}}, {"filtered_array.[]": 1}]} + q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]} try: self.db.find(q, limit=-1) except Exception as e: @@ -83,7 +83,7 @@ def test_limit_skip_field5(self): raise AssertionError("Should have thrown error for negative limit") def test_limit_skip_field6(self): - q = {"$or": [{"user_id" : {"$lt" : 100}}, {"filtered_array.[]": 1}]} + q = {"$or": [{"user_id": {"$lt": 100}}, {"filtered_array.[]": 1}]} try: self.db.find(q, skip=-1) except Exception as e: @@ -99,7 +99,6 @@ def test_limit_bookmark(self): for i in range(1, len(limit_docs.DOCS), 5): self.run_bookmark_sort_check(i) - def run_bookmark_check(self, size): q = {"age": {"$gt": 0}} seen_docs = set() @@ -121,8 +120,9 @@ def run_bookmark_sort_check(self, size): bm = None age = 0 while True: - json = self.db.find(q, limit=size, bookmark=bm, sort=["age"], - return_raw=True) + json = self.db.find( + q, limit=size, bookmark=bm, sort=["age"], return_raw=True + ) for doc in json["docs"]: assert doc["_id"] not in seen_docs assert doc["age"] >= age diff --git a/src/mango/test/09-text-sort-test.py b/src/mango/test/09-text-sort-test.py index a1a644c7959..c0c36ccd0d5 100644 --- a/src/mango/test/09-text-sort-test.py +++ b/src/mango/test/09-text-sort-test.py @@ -13,9 +13,9 @@ import mango import unittest + @unittest.skipUnless(mango.has_text_service(), "requires text service") class SortTests(mango.UserDocsTextTests): - def test_number_sort(self): q = {"age": {"$gt": 0}} docs = self.db.find(q, sort=["age:number"]) @@ -58,25 +58,29 @@ def test_multi_sort(self): q = {"name": {"$exists": True}} docs = self.db.find(q, sort=["name.last:string", "age:number"]) self.assertEqual(len(docs), 15) - self.assertEqual(docs[0]["name"], {"last":"Ewing","first":"Shelly"}) + self.assertEqual(docs[0]["name"], {"last": "Ewing", "first": "Shelly"}) self.assertEqual(docs[1]["age"], 22) def test_guess_type_sort(self): - q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}}]} + q = {"$or": [{"age": {"$gt": 0}}, {"email": {"$gt": None}}]} docs = self.db.find(q, sort=["age"]) self.assertEqual(len(docs), 15) self.assertEqual(docs[0]["age"], 22) def test_guess_dup_type_sort(self): - q = {"$and": [{"age":{"$gt": 0}}, {"email": {"$gt": None}}, - {"age":{"$lte": 100}}]} + q = { + "$and": [ + {"age": {"$gt": 0}}, + {"email": {"$gt": None}}, + {"age": {"$lte": 100}}, + ] + } docs = self.db.find(q, sort=["age"]) self.assertEqual(len(docs), 15) self.assertEqual(docs[0]["age"], 22) def test_ambiguous_type_sort(self): - q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}}, - {"age": "34"}]} + q = {"$or": [{"age": {"$gt": 0}}, {"email": {"$gt": None}}, {"age": "34"}]} try: self.db.find(q, sort=["age"]) except Exception as e: @@ -85,17 +89,27 @@ def test_ambiguous_type_sort(self): raise AssertionError("Should have thrown error for sort") def test_guess_multi_sort(self): - q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}}, - {"name.last": "Harvey"}]} + q = { + "$or": [ + {"age": {"$gt": 0}}, + {"email": {"$gt": None}}, + {"name.last": "Harvey"}, + ] + } docs = self.db.find(q, sort=["name.last", "age"]) self.assertEqual(len(docs), 15) - self.assertEqual(docs[0]["name"], {"last":"Ewing","first":"Shelly"}) + self.assertEqual(docs[0]["name"], {"last": "Ewing", "first": "Shelly"}) self.assertEqual(docs[1]["age"], 22) def test_guess_mix_sort(self): - q = {"$or": [{"age":{"$gt": 0}}, {"email": {"$gt": None}}, - {"name.last": "Harvey"}]} + q = { + "$or": [ + {"age": {"$gt": 0}}, + {"email": {"$gt": None}}, + {"name.last": "Harvey"}, + ] + } docs = self.db.find(q, sort=["name.last:string", "age"]) self.assertEqual(len(docs), 15) - self.assertEqual(docs[0]["name"], {"last":"Ewing","first":"Shelly"}) + self.assertEqual(docs[0]["name"], {"last": "Ewing", "first": "Shelly"}) self.assertEqual(docs[1]["age"], 22) diff --git a/src/mango/test/10-disable-array-length-field-test.py b/src/mango/test/10-disable-array-length-field-test.py index 6b6d4192651..ea3279b55cc 100644 --- a/src/mango/test/10-disable-array-length-field-test.py +++ b/src/mango/test/10-disable-array-length-field-test.py @@ -13,26 +13,32 @@ import mango import unittest + @unittest.skipUnless(mango.has_text_service(), "requires text service") class DisableIndexArrayLengthsTest(mango.UserDocsTextTests): - def setUp(self): self.db.recreate() - self.db.create_text_index(ddoc="disable_index_array_lengths", - analyzer="keyword", - index_array_lengths=False) - self.db.create_text_index(ddoc="explicit_enable_index_array_lengths", - analyzer="keyword", - index_array_lengths=True) + self.db.create_text_index( + ddoc="disable_index_array_lengths", + analyzer="keyword", + index_array_lengths=False, + ) + self.db.create_text_index( + ddoc="explicit_enable_index_array_lengths", + analyzer="keyword", + index_array_lengths=True, + ) def test_disable_index_array_length(self): - docs = self.db.find({"favorites": {"$size": 4}}, - use_index="disable_index_array_lengths") + docs = self.db.find( + {"favorites": {"$size": 4}}, use_index="disable_index_array_lengths" + ) for d in docs: assert len(d["favorites"]) == 0 def test_enable_index_array_length(self): - docs = self.db.find({"favorites": {"$size": 4}}, - use_index="explicit_enable_index_array_lengths") + docs = self.db.find( + {"favorites": {"$size": 4}}, use_index="explicit_enable_index_array_lengths" + ) for d in docs: assert len(d["favorites"]) == 4 diff --git a/src/mango/test/11-ignore-design-docs-test.py b/src/mango/test/11-ignore-design-docs-test.py index ea7165e3f5b..f31dcc5d136 100644 --- a/src/mango/test/11-ignore-design-docs-test.py +++ b/src/mango/test/11-ignore-design-docs-test.py @@ -14,26 +14,14 @@ import unittest DOCS = [ - { - "_id": "_design/my-design-doc", - }, - { - "_id": "54af50626de419f5109c962f", - "user_id": 0, - "age": 10, - "name": "Jimi" - }, - { - "_id": "54af50622071121b25402dc3", - "user_id": 1, - "age": 11, - "name": "Eddie" - } + {"_id": "_design/my-design-doc"}, + {"_id": "54af50626de419f5109c962f", "user_id": 0, "age": 10, "name": "Jimi"}, + {"_id": "54af50622071121b25402dc3", "user_id": 1, "age": 11, "name": "Eddie"}, ] + class IgnoreDesignDocsForAllDocsIndexTests(mango.DbPerClass): def test_should_not_return_design_docs(self): self.db.save_docs(DOCS) docs = self.db.find({"_id": {"$gte": None}}) assert len(docs) == 2 - diff --git a/src/mango/test/12-use-correct-index-test.py b/src/mango/test/12-use-correct-index-test.py index 7bb90ebf955..2de88a21ab1 100644 --- a/src/mango/test/12-use-correct-index-test.py +++ b/src/mango/test/12-use-correct-index-test.py @@ -14,16 +14,14 @@ import copy DOCS = [ - { - "_id": "_design/my-design-doc", - }, + {"_id": "_design/my-design-doc"}, { "_id": "54af50626de419f5109c962f", "user_id": 0, "age": 10, "name": "Jimi", "location": "UK", - "number": 4 + "number": 4, }, { "_id": "54af50622071121b25402dc3", @@ -31,7 +29,7 @@ "age": 12, "name": "Eddie", "location": "ZAR", - "number": 2 + "number": 2, }, { "_id": "54af50622071121b25402dc6", @@ -39,16 +37,17 @@ "age": 6, "name": "Harry", "location": "US", - "number":8 + "number": 8, }, { "_id": "54af50622071121b25402dc9", "name": "Eddie", "occupation": "engineer", - "number":7 + "number": 7, }, ] + class ChooseCorrectIndexForDocs(mango.DbPerClass): def setUp(self): self.db.recreate() @@ -58,53 +57,58 @@ def test_choose_index_with_one_field_in_index(self): self.db.create_index(["name", "age", "user_id"], ddoc="aaa") self.db.create_index(["name"], ddoc="zzz") explain = self.db.find({"name": "Eddie"}, explain=True) - self.assertEqual(explain["index"]["ddoc"], '_design/zzz') + self.assertEqual(explain["index"]["ddoc"], "_design/zzz") def test_choose_index_with_two(self): self.db.create_index(["name", "age", "user_id"], ddoc="aaa") self.db.create_index(["name", "age"], ddoc="bbb") self.db.create_index(["name"], ddoc="zzz") - explain = self.db.find({"name": "Eddie", "age":{"$gte": 12}}, explain=True) - self.assertEqual(explain["index"]["ddoc"], '_design/bbb') + explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True) + self.assertEqual(explain["index"]["ddoc"], "_design/bbb") def test_choose_index_alphabetically(self): self.db.create_index(["name"], ddoc="aaa") self.db.create_index(["name"], ddoc="bbb") self.db.create_index(["name"], ddoc="zzz") explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True) - self.assertEqual(explain["index"]["ddoc"], '_design/aaa') + self.assertEqual(explain["index"]["ddoc"], "_design/aaa") def test_choose_index_most_accurate(self): self.db.create_index(["name", "age", "user_id"], ddoc="aaa") self.db.create_index(["name", "age"], ddoc="bbb") self.db.create_index(["name"], ddoc="zzz") explain = self.db.find({"name": "Eddie", "age": {"$gte": 12}}, explain=True) - self.assertEqual(explain["index"]["ddoc"], '_design/bbb') - + self.assertEqual(explain["index"]["ddoc"], "_design/bbb") + def test_choose_index_most_accurate_in_memory_selector(self): self.db.create_index(["name", "location", "user_id"], ddoc="aaa") self.db.create_index(["name", "age", "user_id"], ddoc="bbb") self.db.create_index(["name"], ddoc="zzz") explain = self.db.find({"name": "Eddie", "number": {"$lte": 12}}, explain=True) - self.assertEqual(explain["index"]["ddoc"], '_design/zzz') + self.assertEqual(explain["index"]["ddoc"], "_design/zzz") def test_warn_on_full_db_scan(self): - selector = {"not_indexed":"foo"} + selector = {"not_indexed": "foo"} explain_resp = self.db.find(selector, explain=True, return_raw=True) self.assertEqual(explain_resp["index"]["type"], "special") resp = self.db.find(selector, return_raw=True) - self.assertEqual(resp["warning"], "no matching index found, create an index to optimize query time") + self.assertEqual( + resp["warning"], + "no matching index found, create an index to optimize query time", + ) def test_chooses_idxA(self): - DOCS2 = [ - {"a":1, "b":1, "c":1}, - {"a":1000, "d" : 1000, "e": 1000} - ] + DOCS2 = [{"a": 1, "b": 1, "c": 1}, {"a": 1000, "d": 1000, "e": 1000}] self.db.save_docs(copy.deepcopy(DOCS2)) self.db.create_index(["a", "b", "c"]) self.db.create_index(["a", "d", "e"]) - explain = self.db.find({"a": {"$gt": 0}, "b": {"$gt": 0}, "c": {"$gt": 0}}, explain=True) - self.assertEqual(explain["index"]["def"]["fields"], [{'a': 'asc'}, {'b': 'asc'}, {'c': 'asc'}]) + explain = self.db.find( + {"a": {"$gt": 0}, "b": {"$gt": 0}, "c": {"$gt": 0}}, explain=True + ) + self.assertEqual( + explain["index"]["def"]["fields"], + [{"a": "asc"}, {"b": "asc"}, {"c": "asc"}], + ) def test_can_query_with_range_on_secondary_column(self): self.db.create_index(["age", "name"], ddoc="bbb") @@ -113,7 +117,7 @@ def test_can_query_with_range_on_secondary_column(self): self.assertEqual(len(docs), 1) explain = self.db.find(selector, explain=True) self.assertEqual(explain["index"]["ddoc"], "_design/bbb") - self.assertEqual(explain["mrargs"]["end_key"], [10, '']) + self.assertEqual(explain["mrargs"]["end_key"], [10, ""]) # all documents contain an _id and _rev field they # should not be used to restrict indexes based on the @@ -121,9 +125,9 @@ def test_can_query_with_range_on_secondary_column(self): def test_choose_index_with_id(self): self.db.create_index(["name", "_id"], ddoc="aaa") explain = self.db.find({"name": "Eddie"}, explain=True) - self.assertEqual(explain["index"]["ddoc"], '_design/aaa') + self.assertEqual(explain["index"]["ddoc"], "_design/aaa") def test_choose_index_with_rev(self): self.db.create_index(["name", "_rev"], ddoc="aaa") explain = self.db.find({"name": "Eddie"}, explain=True) - self.assertEqual(explain["index"]["ddoc"], '_design/aaa') + self.assertEqual(explain["index"]["ddoc"], "_design/aaa") diff --git a/src/mango/test/13-stable-update-test.py b/src/mango/test/13-stable-update-test.py index 3d78ecc65d8..348ac5ee718 100644 --- a/src/mango/test/13-stable-update-test.py +++ b/src/mango/test/13-stable-update-test.py @@ -20,7 +20,7 @@ "age": 10, "name": "Jimi", "location": "UK", - "number": 4 + "number": 4, }, { "_id": "54af50622071121b25402dc3", @@ -28,10 +28,11 @@ "age": 12, "name": "Eddie", "location": "ZAR", - "number": 2 + "number": 2, }, ] + class SupportStableAndUpdate(mango.DbPerClass): def setUp(self): self.db.recreate() diff --git a/src/mango/test/13-users-db-find-test.py b/src/mango/test/13-users-db-find-test.py index d8d32ad939d..73d15ea1a35 100644 --- a/src/mango/test/13-users-db-find-test.py +++ b/src/mango/test/13-users-db-find-test.py @@ -16,7 +16,6 @@ class UsersDbFindTests(mango.UsersDbTests): - def test_simple_find(self): docs = self.db.find({"name": {"$eq": "demo02"}}) assert len(docs) == 1 @@ -29,15 +28,9 @@ def test_multi_cond_and(self): assert docs[0]["_id"] == "org.couchdb.user:demo02" def test_multi_cond_or(self): - docs = self.db.find({ - "$and":[ - {"type": "user"}, - {"$or": [ - {"order": 1}, - {"order": 3} - ]} - ] - }) + docs = self.db.find( + {"$and": [{"type": "user"}, {"$or": [{"order": 1}, {"order": 3}]}]} + ) assert len(docs) == 2 assert docs[0]["_id"] == "org.couchdb.user:demo01" assert docs[1]["_id"] == "org.couchdb.user:demo03" @@ -65,7 +58,6 @@ def test_empty(self): class UsersDbIndexFindTests(UsersDbFindTests): - def setUp(self): self.db.create_index(["name"]) @@ -80,4 +72,3 @@ def test_multi_cond_or(self): def test_sort(self): self.db.create_index(["order", "name"]) super(UsersDbIndexFindTests, self).test_sort() - diff --git a/src/mango/test/14-json-pagination-test.py b/src/mango/test/14-json-pagination-test.py index ea06e0a2a10..2d24301528a 100644 --- a/src/mango/test/14-json-pagination-test.py +++ b/src/mango/test/14-json-pagination-test.py @@ -14,146 +14,129 @@ import copy DOCS = [ - { - "_id": "100", - "name": "Jimi", - "location": "AUS", - "user_id": 1, - "same": "value" - }, - { - "_id": "200", - "name": "Eddie", - "location": "BRA", - "user_id": 2, - "same": "value" - }, - { - "_id": "300", - "name": "Harry", - "location": "CAN", - "user_id":3, - "same": "value" - }, - { - "_id": "400", - "name": "Eddie", - "location": "DEN", - "user_id":4, - "same": "value" - }, - { - "_id": "500", - "name": "Jones", - "location": "ETH", - "user_id":5, - "same": "value" - }, + {"_id": "100", "name": "Jimi", "location": "AUS", "user_id": 1, "same": "value"}, + {"_id": "200", "name": "Eddie", "location": "BRA", "user_id": 2, "same": "value"}, + {"_id": "300", "name": "Harry", "location": "CAN", "user_id": 3, "same": "value"}, + {"_id": "400", "name": "Eddie", "location": "DEN", "user_id": 4, "same": "value"}, + {"_id": "500", "name": "Jones", "location": "ETH", "user_id": 5, "same": "value"}, { "_id": "600", "name": "Winnifried", "location": "FRA", - "user_id":6, - "same": "value" - }, - { - "_id": "700", - "name": "Marilyn", - "location": "GHA", - "user_id":7, - "same": "value" - }, - { - "_id": "800", - "name": "Sandra", - "location": "ZAR", - "user_id":8, - "same": "value" + "user_id": 6, + "same": "value", }, + {"_id": "700", "name": "Marilyn", "location": "GHA", "user_id": 7, "same": "value"}, + {"_id": "800", "name": "Sandra", "location": "ZAR", "user_id": 8, "same": "value"}, ] + class PaginateJsonDocs(mango.DbPerClass): def setUp(self): self.db.recreate() self.db.save_docs(copy.deepcopy(DOCS)) def test_all_docs_paginate_to_end(self): - selector = {"_id": {"$gt": 0}} + selector = {"_id": {"$gt": 0}} # Page 1 resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True) - bookmark = resp['bookmark'] - docs = resp['docs'] - assert docs[0]['_id'] == '100' + bookmark = resp["bookmark"] + docs = resp["docs"] + assert docs[0]["_id"] == "100" assert len(docs) == 5 # Page 2 - resp = self.db.find(selector, fields=["_id"], bookmark= bookmark, limit=5, return_raw=True) - bookmark = resp['bookmark'] - docs = resp['docs'] - assert docs[0]['_id'] == '600' + resp = self.db.find( + selector, fields=["_id"], bookmark=bookmark, limit=5, return_raw=True + ) + bookmark = resp["bookmark"] + docs = resp["docs"] + assert docs[0]["_id"] == "600" assert len(docs) == 3 - # Page 3 - resp = self.db.find(selector, bookmark= bookmark, limit=5, return_raw=True) - bookmark = resp['bookmark'] - docs = resp['docs'] + # Page 3 + resp = self.db.find(selector, bookmark=bookmark, limit=5, return_raw=True) + bookmark = resp["bookmark"] + docs = resp["docs"] assert len(docs) == 0 def test_return_previous_bookmark_for_empty(self): - selector = {"_id": {"$gt": 0}} + selector = {"_id": {"$gt": 0}} # Page 1 resp = self.db.find(selector, fields=["_id"], return_raw=True) - bookmark1 = resp['bookmark'] - docs = resp['docs'] + bookmark1 = resp["bookmark"] + docs = resp["docs"] assert len(docs) == 8 - resp = self.db.find(selector, fields=["_id"], return_raw=True, bookmark=bookmark1) - bookmark2 = resp['bookmark'] - docs = resp['docs'] + resp = self.db.find( + selector, fields=["_id"], return_raw=True, bookmark=bookmark1 + ) + bookmark2 = resp["bookmark"] + docs = resp["docs"] assert len(docs) == 0 - resp = self.db.find(selector, fields=["_id"], return_raw=True, bookmark=bookmark2) - bookmark3 = resp['bookmark'] - docs = resp['docs'] + resp = self.db.find( + selector, fields=["_id"], return_raw=True, bookmark=bookmark2 + ) + bookmark3 = resp["bookmark"] + docs = resp["docs"] assert bookmark3 == bookmark2 assert len(docs) == 0 def test_all_docs_with_skip(self): - selector = {"_id": {"$gt": 0}} + selector = {"_id": {"$gt": 0}} # Page 1 resp = self.db.find(selector, fields=["_id"], skip=2, limit=5, return_raw=True) - bookmark = resp['bookmark'] - docs = resp['docs'] - assert docs[0]['_id'] == '300' + bookmark = resp["bookmark"] + docs = resp["docs"] + assert docs[0]["_id"] == "300" assert len(docs) == 5 # Page 2 - resp = self.db.find(selector, fields=["_id"], bookmark= bookmark, limit=5, return_raw=True) - bookmark = resp['bookmark'] - docs = resp['docs'] - assert docs[0]['_id'] == '800' + resp = self.db.find( + selector, fields=["_id"], bookmark=bookmark, limit=5, return_raw=True + ) + bookmark = resp["bookmark"] + docs = resp["docs"] + assert docs[0]["_id"] == "800" assert len(docs) == 1 - resp = self.db.find(selector, bookmark= bookmark, limit=5, return_raw=True) - bookmark = resp['bookmark'] - docs = resp['docs'] + resp = self.db.find(selector, bookmark=bookmark, limit=5, return_raw=True) + bookmark = resp["bookmark"] + docs = resp["docs"] assert len(docs) == 0 def test_all_docs_reverse(self): - selector = {"_id": {"$gt": 0}} - resp = self.db.find(selector, fields=["_id"], sort=[{"_id": "desc"}], limit=5, return_raw=True) - docs = resp['docs'] + selector = {"_id": {"$gt": 0}} + resp = self.db.find( + selector, fields=["_id"], sort=[{"_id": "desc"}], limit=5, return_raw=True + ) + docs = resp["docs"] bookmark1 = resp["bookmark"] assert len(docs) == 5 - assert docs[0]['_id'] == '800' + assert docs[0]["_id"] == "800" - resp = self.db.find(selector, fields=["_id"], sort=[{"_id": "desc"}], limit=5, return_raw=True, bookmark=bookmark1) - docs = resp['docs'] + resp = self.db.find( + selector, + fields=["_id"], + sort=[{"_id": "desc"}], + limit=5, + return_raw=True, + bookmark=bookmark1, + ) + docs = resp["docs"] bookmark2 = resp["bookmark"] assert len(docs) == 3 - assert docs[0]['_id'] == '300' + assert docs[0]["_id"] == "300" - resp = self.db.find(selector, fields=["_id"], sort=[{"_id": "desc"}], limit=5, return_raw=True, bookmark=bookmark2) - docs = resp['docs'] + resp = self.db.find( + selector, + fields=["_id"], + sort=[{"_id": "desc"}], + limit=5, + return_raw=True, + bookmark=bookmark2, + ) + docs = resp["docs"] assert len(docs) == 0 def test_bad_bookmark(self): @@ -162,13 +145,15 @@ def test_bad_bookmark(self): except Exception as e: resp = e.response.json() assert resp["error"] == "invalid_bookmark" - assert resp["reason"] == "Invalid bookmark value: \"bad-bookmark\"" + assert resp["reason"] == 'Invalid bookmark value: "bad-bookmark"' assert e.response.status_code == 400 else: raise AssertionError("Should have thrown error for bad bookmark") - + def test_throws_error_on_text_bookmark(self): - bookmark = 'g2wAAAABaANkABFub2RlMUBjb3VjaGRiLm5ldGwAAAACYQBiP____2poAkY_8AAAAAAAAGEHag' + bookmark = ( + "g2wAAAABaANkABFub2RlMUBjb3VjaGRiLm5ldGwAAAACYQBiP____2poAkY_8AAAAAAAAGEHag" + ) try: self.db.find({"_id": {"$gt": 0}}, bookmark=bookmark) except Exception as e: @@ -177,80 +162,108 @@ def test_throws_error_on_text_bookmark(self): assert e.response.status_code == 400 else: raise AssertionError("Should have thrown error for bad bookmark") - + def test_index_pagination(self): self.db.create_index(["location"]) - selector = {"location": {"$gt": "A"}} + selector = {"location": {"$gt": "A"}} resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True) - docs = resp['docs'] + docs = resp["docs"] bookmark1 = resp["bookmark"] assert len(docs) == 5 - assert docs[0]['_id'] == '100' + assert docs[0]["_id"] == "100" - resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark1) - docs = resp['docs'] + resp = self.db.find( + selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark1 + ) + docs = resp["docs"] bookmark2 = resp["bookmark"] assert len(docs) == 3 - assert docs[0]['_id'] == '600' + assert docs[0]["_id"] == "600" - resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark2) - docs = resp['docs'] + resp = self.db.find( + selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark2 + ) + docs = resp["docs"] assert len(docs) == 0 def test_index_pagination_two_keys(self): self.db.create_index(["location", "user_id"]) - selector = {"location": {"$gt": "A"}, "user_id": {"$gte": 1}} + selector = {"location": {"$gt": "A"}, "user_id": {"$gte": 1}} resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True) - docs = resp['docs'] + docs = resp["docs"] bookmark1 = resp["bookmark"] assert len(docs) == 5 - assert docs[0]['_id'] == '100' + assert docs[0]["_id"] == "100" - resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark1) - docs = resp['docs'] + resp = self.db.find( + selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark1 + ) + docs = resp["docs"] bookmark2 = resp["bookmark"] assert len(docs) == 3 - assert docs[0]['_id'] == '600' + assert docs[0]["_id"] == "600" - resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark2) - docs = resp['docs'] + resp = self.db.find( + selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark2 + ) + docs = resp["docs"] assert len(docs) == 0 def test_index_pagination_reverse(self): self.db.create_index(["location", "user_id"]) - selector = {"location": {"$gt": "A"}, "user_id": {"$gte": 1}} + selector = {"location": {"$gt": "A"}, "user_id": {"$gte": 1}} sort = [{"location": "desc"}, {"user_id": "desc"}] - resp = self.db.find(selector, fields=["_id"], sort=sort, limit=5, return_raw=True) - docs = resp['docs'] + resp = self.db.find( + selector, fields=["_id"], sort=sort, limit=5, return_raw=True + ) + docs = resp["docs"] bookmark1 = resp["bookmark"] assert len(docs) == 5 - assert docs[0]['_id'] == '800' + assert docs[0]["_id"] == "800" - resp = self.db.find(selector, fields=["_id"], limit=5, sort=sort, return_raw=True, bookmark=bookmark1) - docs = resp['docs'] + resp = self.db.find( + selector, + fields=["_id"], + limit=5, + sort=sort, + return_raw=True, + bookmark=bookmark1, + ) + docs = resp["docs"] bookmark2 = resp["bookmark"] assert len(docs) == 3 - assert docs[0]['_id'] == '300' + assert docs[0]["_id"] == "300" - resp = self.db.find(selector, fields=["_id"], limit=5, sort=sort, return_raw=True, bookmark=bookmark2) - docs = resp['docs'] + resp = self.db.find( + selector, + fields=["_id"], + limit=5, + sort=sort, + return_raw=True, + bookmark=bookmark2, + ) + docs = resp["docs"] assert len(docs) == 0 def test_index_pagination_same_emitted_key(self): self.db.create_index(["same"]) - selector = {"same": {"$gt": ""}} + selector = {"same": {"$gt": ""}} resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True) - docs = resp['docs'] + docs = resp["docs"] bookmark1 = resp["bookmark"] assert len(docs) == 5 - assert docs[0]['_id'] == '100' + assert docs[0]["_id"] == "100" - resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark1) - docs = resp['docs'] + resp = self.db.find( + selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark1 + ) + docs = resp["docs"] bookmark2 = resp["bookmark"] assert len(docs) == 3 - assert docs[0]['_id'] == '600' + assert docs[0]["_id"] == "600" - resp = self.db.find(selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark2) - docs = resp['docs'] + resp = self.db.find( + selector, fields=["_id"], limit=5, return_raw=True, bookmark=bookmark2 + ) + docs = resp["docs"] assert len(docs) == 0 diff --git a/src/mango/test/15-execution-stats-test.py b/src/mango/test/15-execution-stats-test.py index 92a5995199d..922cadf8344 100644 --- a/src/mango/test/15-execution-stats-test.py +++ b/src/mango/test/15-execution-stats-test.py @@ -15,8 +15,8 @@ import os import unittest -class ExecutionStatsTests(mango.UserDocsTests): +class ExecutionStatsTests(mango.UserDocsTests): def test_simple_json_index(self): resp = self.db.find({"age": {"$lt": 35}}, return_raw=True, executionStats=True) self.assertEqual(len(resp["docs"]), 3) @@ -26,7 +26,7 @@ def test_simple_json_index(self): self.assertEqual(resp["execution_stats"]["results_returned"], 3) # See https://github.com/apache/couchdb/issues/1732 # Erlang os:timestamp() only has ms accuracy on Windows! - if os.name != 'nt': + if os.name != "nt": self.assertGreater(resp["execution_stats"]["execution_time_ms"], 0) def test_no_execution_stats(self): @@ -34,7 +34,9 @@ def test_no_execution_stats(self): assert "execution_stats" not in resp def test_quorum_json_index(self): - resp = self.db.find({"age": {"$lt": 35}}, return_raw=True, r=3, executionStats=True) + resp = self.db.find( + {"age": {"$lt": 35}}, return_raw=True, r=3, executionStats=True + ) self.assertEqual(len(resp["docs"]), 3) self.assertEqual(resp["execution_stats"]["total_keys_examined"], 0) self.assertEqual(resp["execution_stats"]["total_docs_examined"], 0) @@ -42,20 +44,22 @@ def test_quorum_json_index(self): self.assertEqual(resp["execution_stats"]["results_returned"], 3) # See https://github.com/apache/couchdb/issues/1732 # Erlang os:timestamp() only has ms accuracy on Windows! - if os.name != 'nt': + if os.name != "nt": self.assertGreater(resp["execution_stats"]["execution_time_ms"], 0) def test_results_returned_limit(self): - resp = self.db.find({"age": {"$lt": 35}}, limit=2, return_raw=True, executionStats=True) + resp = self.db.find( + {"age": {"$lt": 35}}, limit=2, return_raw=True, executionStats=True + ) self.assertEqual(resp["execution_stats"]["results_returned"], len(resp["docs"])) + @unittest.skipUnless(mango.has_text_service(), "requires text service") class ExecutionStatsTests_Text(mango.UserDocsTextTests): - def test_simple_text_index(self): - resp = self.db.find({"$text": "Stephanie"}, - return_raw=True, - executionStats=True) + resp = self.db.find( + {"$text": "Stephanie"}, return_raw=True, executionStats=True + ) self.assertEqual(len(resp["docs"]), 1) self.assertEqual(resp["execution_stats"]["total_keys_examined"], 0) self.assertEqual(resp["execution_stats"]["total_docs_examined"], 1) @@ -64,6 +68,5 @@ def test_simple_text_index(self): self.assertGreater(resp["execution_stats"]["execution_time_ms"], 0) def test_no_execution_stats(self): - resp = self.db.find({"$text": "Stephanie"}, - return_raw=True) + resp = self.db.find({"$text": "Stephanie"}, return_raw=True) self.assertNotIn("execution_stats", resp) diff --git a/src/mango/test/16-index-selectors-test.py b/src/mango/test/16-index-selectors-test.py index a876dc68f0f..4510065f580 100644 --- a/src/mango/test/16-index-selectors-test.py +++ b/src/mango/test/16-index-selectors-test.py @@ -15,62 +15,20 @@ import unittest DOCS = [ - { - "_id": "100", - "name": "Jimi", - "location": "AUS", - "user_id": 1, - "same": "value" - }, - { - "_id": "200", - "name": "Eddie", - "location": "BRA", - "user_id": 2, - "same": "value" - }, - { - "_id": "300", - "name": "Harry", - "location": "CAN", - "user_id":3, - "same": "value" - }, - { - "_id": "400", - "name": "Eddie", - "location": "DEN", - "user_id":4, - "same": "value" - }, - { - "_id": "500", - "name": "Jones", - "location": "ETH", - "user_id":5, - "same": "value" - }, + {"_id": "100", "name": "Jimi", "location": "AUS", "user_id": 1, "same": "value"}, + {"_id": "200", "name": "Eddie", "location": "BRA", "user_id": 2, "same": "value"}, + {"_id": "300", "name": "Harry", "location": "CAN", "user_id": 3, "same": "value"}, + {"_id": "400", "name": "Eddie", "location": "DEN", "user_id": 4, "same": "value"}, + {"_id": "500", "name": "Jones", "location": "ETH", "user_id": 5, "same": "value"}, { "_id": "600", "name": "Winnifried", "location": "FRA", - "user_id":6, - "same": "value" - }, - { - "_id": "700", - "name": "Marilyn", - "location": "GHA", - "user_id":7, - "same": "value" - }, - { - "_id": "800", - "name": "Sandra", - "location": "ZAR", - "user_id":8, - "same": "value" + "user_id": 6, + "same": "value", }, + {"_id": "700", "name": "Marilyn", "location": "GHA", "user_id": 7, "same": "value"}, + {"_id": "800", "name": "Sandra", "location": "ZAR", "user_id": 8, "same": "value"}, ] oldschoolnoselectorddoc = { @@ -78,21 +36,11 @@ "language": "query", "views": { "oldschoolnoselector": { - "map": { - "fields": { - "location": "asc" - } - }, + "map": {"fields": {"location": "asc"}}, "reduce": "_count", - "options": { - "def": { - "fields": [ - "location" - ] - } - } + "options": {"def": {"fields": ["location"]}}, } - } + }, } oldschoolddoc = { @@ -101,23 +49,13 @@ "views": { "oldschool": { "map": { - "fields": { - "location": "asc" - }, - "selector": { - "location": {"$gte": "FRA"} - } + "fields": {"location": "asc"}, + "selector": {"location": {"$gte": "FRA"}}, }, "reduce": "_count", - "options": { - "def": { - "fields": [ - "location" - ] - } - } + "options": {"def": {"fields": ["location"]}}, } - } + }, } oldschoolddoctext = { @@ -128,28 +66,20 @@ "index": { "default_analyzer": "keyword", "default_field": {}, - "selector": { - "location": {"$gte": "FRA"} - }, - "fields": [ - { - "name": "location", - "type": "string" - } - ], - "index_array_lengths": True - }, + "selector": {"location": {"$gte": "FRA"}}, + "fields": [{"name": "location", "type": "string"}], + "index_array_lengths": True, + }, "analyzer": { "name": "perfield", "default": "keyword", - "fields": { - "$default": "standard" - } - } + "fields": {"$default": "standard"}, + }, } - } + }, } + class IndexSelectorJson(mango.DbPerClass): def setUp(self): self.db.recreate() @@ -164,7 +94,7 @@ def test_saves_partial_filter_selector_in_index(self): def test_partial_filter_only_in_return_if_not_default(self): self.db.create_index(["location"]) index = self.db.list_indexes()[1] - self.assertEqual('partial_filter_selector' in index['def'], False) + self.assertEqual("partial_filter_selector" in index["def"], False) def test_saves_selector_in_index_throws(self): selector = {"location": {"$gte": "FRA"}} @@ -177,30 +107,50 @@ def test_saves_selector_in_index_throws(self): def test_uses_partial_index_for_query_selector(self): selector = {"location": {"$gte": "FRA"}} - self.db.create_index(["location"], partial_filter_selector=selector, ddoc="Selected", name="Selected") - resp = self.db.find(selector, explain=True, use_index='Selected') + self.db.create_index( + ["location"], + partial_filter_selector=selector, + ddoc="Selected", + name="Selected", + ) + resp = self.db.find(selector, explain=True, use_index="Selected") self.assertEqual(resp["index"]["name"], "Selected") - docs = self.db.find(selector, use_index='Selected') + docs = self.db.find(selector, use_index="Selected") self.assertEqual(len(docs), 3) def test_uses_partial_index_with_different_selector(self): selector = {"location": {"$gte": "FRA"}} selector2 = {"location": {"$gte": "A"}} - self.db.create_index(["location"], partial_filter_selector=selector, ddoc="Selected", name="Selected") - resp = self.db.find(selector2, explain=True, use_index='Selected') + self.db.create_index( + ["location"], + partial_filter_selector=selector, + ddoc="Selected", + name="Selected", + ) + resp = self.db.find(selector2, explain=True, use_index="Selected") self.assertEqual(resp["index"]["name"], "Selected") - docs = self.db.find(selector2, use_index='Selected') + docs = self.db.find(selector2, use_index="Selected") self.assertEqual(len(docs), 3) def test_doesnot_use_selector_when_not_specified(self): selector = {"location": {"$gte": "FRA"}} - self.db.create_index(["location"], partial_filter_selector=selector, ddoc="Selected", name="Selected") + self.db.create_index( + ["location"], + partial_filter_selector=selector, + ddoc="Selected", + name="Selected", + ) resp = self.db.find(selector, explain=True) self.assertEqual(resp["index"]["name"], "_all_docs") def test_doesnot_use_selector_when_not_specified_with_index(self): selector = {"location": {"$gte": "FRA"}} - self.db.create_index(["location"], partial_filter_selector=selector, ddoc="Selected", name="Selected") + self.db.create_index( + ["location"], + partial_filter_selector=selector, + ddoc="Selected", + name="Selected", + ) self.db.create_index(["location"], name="NotSelected") resp = self.db.find(selector, explain=True) self.assertEqual(resp["index"]["name"], "NotSelected") @@ -208,57 +158,82 @@ def test_doesnot_use_selector_when_not_specified_with_index(self): def test_old_selector_with_no_selector_still_supported(self): selector = {"location": {"$gte": "FRA"}} self.db.save_doc(oldschoolnoselectorddoc) - resp = self.db.find(selector, explain=True, use_index='oldschoolnoselector') + resp = self.db.find(selector, explain=True, use_index="oldschoolnoselector") self.assertEqual(resp["index"]["name"], "oldschoolnoselector") - docs = self.db.find(selector, use_index='oldschoolnoselector') + docs = self.db.find(selector, use_index="oldschoolnoselector") self.assertEqual(len(docs), 3) def test_old_selector_still_supported(self): selector = {"location": {"$gte": "FRA"}} self.db.save_doc(oldschoolddoc) - resp = self.db.find(selector, explain=True, use_index='oldschool') + resp = self.db.find(selector, explain=True, use_index="oldschool") self.assertEqual(resp["index"]["name"], "oldschool") - docs = self.db.find(selector, use_index='oldschool') + docs = self.db.find(selector, use_index="oldschool") self.assertEqual(len(docs), 3) @unittest.skipUnless(mango.has_text_service(), "requires text service") def test_text_saves_partialfilterselector_in_index(self): selector = {"location": {"$gte": "FRA"}} - self.db.create_text_index(fields=[{"name":"location", "type":"string"}], partial_filter_selector=selector) + self.db.create_text_index( + fields=[{"name": "location", "type": "string"}], + partial_filter_selector=selector, + ) indexes = self.db.list_indexes() self.assertEqual(indexes[1]["def"]["partial_filter_selector"], selector) @unittest.skipUnless(mango.has_text_service(), "requires text service") def test_text_uses_partial_index_for_query_selector(self): selector = {"location": {"$gte": "FRA"}} - self.db.create_text_index(fields=[{"name":"location", "type":"string"}], partial_filter_selector=selector, ddoc="Selected", name="Selected") - resp = self.db.find(selector, explain=True, use_index='Selected') + self.db.create_text_index( + fields=[{"name": "location", "type": "string"}], + partial_filter_selector=selector, + ddoc="Selected", + name="Selected", + ) + resp = self.db.find(selector, explain=True, use_index="Selected") self.assertEqual(resp["index"]["name"], "Selected") - docs = self.db.find(selector, use_index='Selected', fields=['_id', 'location']) + docs = self.db.find(selector, use_index="Selected", fields=["_id", "location"]) self.assertEqual(len(docs), 3) @unittest.skipUnless(mango.has_text_service(), "requires text service") def test_text_uses_partial_index_with_different_selector(self): selector = {"location": {"$gte": "FRA"}} selector2 = {"location": {"$gte": "A"}} - self.db.create_text_index(fields=[{"name":"location", "type":"string"}], partial_filter_selector=selector, ddoc="Selected", name="Selected") - resp = self.db.find(selector2, explain=True, use_index='Selected') + self.db.create_text_index( + fields=[{"name": "location", "type": "string"}], + partial_filter_selector=selector, + ddoc="Selected", + name="Selected", + ) + resp = self.db.find(selector2, explain=True, use_index="Selected") self.assertEqual(resp["index"]["name"], "Selected") - docs = self.db.find(selector2, use_index='Selected') + docs = self.db.find(selector2, use_index="Selected") self.assertEqual(len(docs), 3) @unittest.skipUnless(mango.has_text_service(), "requires text service") def test_text_doesnot_use_selector_when_not_specified(self): selector = {"location": {"$gte": "FRA"}} - self.db.create_text_index(fields=[{"name":"location", "type":"string"}], partial_filter_selector=selector, ddoc="Selected", name="Selected") + self.db.create_text_index( + fields=[{"name": "location", "type": "string"}], + partial_filter_selector=selector, + ddoc="Selected", + name="Selected", + ) resp = self.db.find(selector, explain=True) self.assertEqual(resp["index"]["name"], "_all_docs") @unittest.skipUnless(mango.has_text_service(), "requires text service") def test_text_doesnot_use_selector_when_not_specified_with_index(self): selector = {"location": {"$gte": "FRA"}} - self.db.create_text_index(fields=[{"name":"location", "type":"string"}], partial_filter_selector=selector, ddoc="Selected", name="Selected") - self.db.create_text_index(fields=[{"name":"location", "type":"string"}], name="NotSelected") + self.db.create_text_index( + fields=[{"name": "location", "type": "string"}], + partial_filter_selector=selector, + ddoc="Selected", + name="Selected", + ) + self.db.create_text_index( + fields=[{"name": "location", "type": "string"}], name="NotSelected" + ) resp = self.db.find(selector, explain=True) self.assertEqual(resp["index"]["name"], "NotSelected") @@ -266,23 +241,25 @@ def test_text_doesnot_use_selector_when_not_specified_with_index(self): def test_text_old_selector_still_supported(self): selector = {"location": {"$gte": "FRA"}} self.db.save_doc(oldschoolddoctext) - resp = self.db.find(selector, explain=True, use_index='oldschooltext') + resp = self.db.find(selector, explain=True, use_index="oldschooltext") self.assertEqual(resp["index"]["name"], "oldschooltext") - docs = self.db.find(selector, use_index='oldschooltext') + docs = self.db.find(selector, use_index="oldschooltext") self.assertEqual(len(docs), 3) @unittest.skipUnless(mango.has_text_service(), "requires text service") def test_text_old_selector_still_supported_via_api(self): selector = {"location": {"$gte": "FRA"}} - self.db.create_text_index(fields=[{"name":"location", "type":"string"}], - selector=selector, - ddoc="Selected", - name="Selected") - docs = self.db.find({"location": {"$exists":True}}, use_index='Selected') + self.db.create_text_index( + fields=[{"name": "location", "type": "string"}], + selector=selector, + ddoc="Selected", + name="Selected", + ) + docs = self.db.find({"location": {"$exists": True}}, use_index="Selected") self.assertEqual(len(docs), 3) @unittest.skipUnless(mango.has_text_service(), "requires text service") def test_text_partial_filter_only_in_return_if_not_default(self): - self.db.create_text_index(fields=[{"name":"location", "type":"string"}]) + self.db.create_text_index(fields=[{"name": "location", "type": "string"}]) index = self.db.list_indexes()[1] - self.assertEqual('partial_filter_selector' in index['def'], False) + self.assertEqual("partial_filter_selector" in index["def"], False) diff --git a/src/mango/test/17-multi-type-value-test.py b/src/mango/test/17-multi-type-value-test.py index d838447d55e..21e7afda420 100644 --- a/src/mango/test/17-multi-type-value-test.py +++ b/src/mango/test/17-multi-type-value-test.py @@ -15,36 +15,15 @@ import unittest DOCS = [ - { - "_id": "1", - "name": "Jimi", - "age": 10 - }, - { - "_id": "2", - "name": {"forename":"Eddie"}, - "age": 20 - }, - { - "_id": "3", - "name": None, - "age": 30 - }, - { - "_id": "4", - "name": 1, - "age": 40 - }, - { - "_id": "5", - "forename": "Sam", - "age": 50 - } + {"_id": "1", "name": "Jimi", "age": 10}, + {"_id": "2", "name": {"forename": "Eddie"}, "age": 20}, + {"_id": "3", "name": None, "age": 30}, + {"_id": "4", "name": 1, "age": 40}, + {"_id": "5", "forename": "Sam", "age": 50}, ] class MultiValueFieldTests: - def test_can_query_with_name(self): docs = self.db.find({"name": {"$exists": True}}) self.assertEqual(len(docs), 4) @@ -71,7 +50,6 @@ def test_can_query_with_age_and_name_range(self): self.assertIn("name", d) - class MultiValueFieldJSONTests(mango.DbPerClass, MultiValueFieldTests): def setUp(self): self.db.recreate() @@ -79,6 +57,7 @@ def setUp(self): self.db.create_index(["name"]) self.db.create_index(["age", "name"]) + # @unittest.skipUnless(mango.has_text_service(), "requires text service") # class MultiValueFieldTextTests(MultiValueFieldDocsNoIndexes, OperatorTests): # pass diff --git a/src/mango/test/18-json-sort.py b/src/mango/test/18-json-sort.py index f8d2abe99d7..d4e60a32cb0 100644 --- a/src/mango/test/18-json-sort.py +++ b/src/mango/test/18-json-sort.py @@ -15,38 +15,14 @@ import unittest DOCS = [ - { - "_id": "1", - "name": "Jimi", - "age": 10, - "cars": 1 - }, - { - "_id": "2", - "name": "Eddie", - "age": 20, - "cars": 1 - }, - { - "_id": "3", - "name": "Jane", - "age": 30, - "cars": 2 - }, - { - "_id": "4", - "name": "Mary", - "age": 40, - "cars": 2 - }, - { - "_id": "5", - "name": "Sam", - "age": 50, - "cars": 3 - } + {"_id": "1", "name": "Jimi", "age": 10, "cars": 1}, + {"_id": "2", "name": "Eddie", "age": 20, "cars": 1}, + {"_id": "3", "name": "Jane", "age": 30, "cars": 2}, + {"_id": "4", "name": "Mary", "age": 40, "cars": 2}, + {"_id": "5", "name": "Sam", "age": 50, "cars": 3}, ] + class JSONIndexSortOptimisations(mango.DbPerClass): def setUp(self): self.db.recreate() @@ -54,62 +30,33 @@ def setUp(self): def test_works_for_basic_case(self): self.db.create_index(["cars", "age"], name="cars-age") - selector = { - "cars": "2", - "age": { - "$gt": 10 - } - } + selector = {"cars": "2", "age": {"$gt": 10}} explain = self.db.find(selector, sort=["age"], explain=True) self.assertEqual(explain["index"]["name"], "cars-age") self.assertEqual(explain["mrargs"]["direction"], "fwd") def test_works_for_all_fields_specified(self): self.db.create_index(["cars", "age"], name="cars-age") - selector = { - "cars": "2", - "age": { - "$gt": 10 - } - } + selector = {"cars": "2", "age": {"$gt": 10}} explain = self.db.find(selector, sort=["cars", "age"], explain=True) self.assertEqual(explain["index"]["name"], "cars-age") def test_works_for_no_sort_fields_specified(self): self.db.create_index(["cars", "age"], name="cars-age") - selector = { - "cars": { - "$gt": 10 - }, - "age": { - "$gt": 10 - } - } + selector = {"cars": {"$gt": 10}, "age": {"$gt": 10}} explain = self.db.find(selector, explain=True) self.assertEqual(explain["index"]["name"], "cars-age") def test_works_for_opp_dir_sort(self): self.db.create_index(["cars", "age"], name="cars-age") - selector = { - "cars": "2", - "age": { - "$gt": 10 - } - } + selector = {"cars": "2", "age": {"$gt": 10}} explain = self.db.find(selector, sort=[{"age": "desc"}], explain=True) self.assertEqual(explain["index"]["name"], "cars-age") self.assertEqual(explain["mrargs"]["direction"], "rev") - + def test_not_work_for_non_constant_field(self): self.db.create_index(["cars", "age"], name="cars-age") - selector = { - "cars": { - "$gt": 10 - }, - "age": { - "$gt": 10 - } - } + selector = {"cars": {"$gt": 10}, "age": {"$gt": 10}} try: self.db.find(selector, explain=True, sort=["age"]) raise Exception("Should not get here") @@ -119,39 +66,19 @@ def test_not_work_for_non_constant_field(self): def test_three_index_one(self): self.db.create_index(["cars", "age", "name"], name="cars-age-name") - selector = { - "cars": "2", - "age": 10, - "name": { - "$gt": "AA" - } - } + selector = {"cars": "2", "age": 10, "name": {"$gt": "AA"}} explain = self.db.find(selector, sort=["name"], explain=True) self.assertEqual(explain["index"]["name"], "cars-age-name") def test_three_index_two(self): self.db.create_index(["cars", "age", "name"], name="cars-age-name") - selector = { - "cars": "2", - "name": "Eddie", - "age": { - "$gt": 10 - } - } + selector = {"cars": "2", "name": "Eddie", "age": {"$gt": 10}} explain = self.db.find(selector, sort=["age"], explain=True) self.assertEqual(explain["index"]["name"], "cars-age-name") def test_three_index_fails(self): self.db.create_index(["cars", "age", "name"], name="cars-age-name") - selector = { - "name": "Eddie", - "age": { - "$gt": 1 - }, - "cars": { - "$gt": "1" - } - } + selector = {"name": "Eddie", "age": {"$gt": 1}, "cars": {"$gt": "1"}} try: self.db.find(selector, explain=True, sort=["name"]) raise Exception("Should not get here") @@ -161,27 +88,13 @@ def test_three_index_fails(self): def test_empty_sort(self): self.db.create_index(["cars", "age", "name"], name="cars-age-name") - selector = { - "name": { - "$gt": "Eddie", - }, - "age": 10, - "cars": { - "$gt": "1" - } - } + selector = {"name": {"$gt": "Eddie"}, "age": 10, "cars": {"$gt": "1"}} explain = self.db.find(selector, explain=True) self.assertEqual(explain["index"]["name"], "cars-age-name") def test_in_between(self): self.db.create_index(["cars", "age", "name"], name="cars-age-name") - selector = { - "name": "Eddie", - "age": 10, - "cars": { - "$gt": "1" - } - } + selector = {"name": "Eddie", "age": 10, "cars": {"$gt": "1"}} explain = self.db.find(selector, explain=True) self.assertEqual(explain["index"]["name"], "cars-age-name") @@ -191,29 +104,16 @@ def test_in_between(self): except Exception as e: resp = e.response.json() self.assertEqual(resp["error"], "no_usable_index") - + def test_ignore_after_set_sort_value(self): self.db.create_index(["cars", "age", "name"], name="cars-age-name") - selector = { - "age": { - "$gt": 10 - }, - "cars": 2, - "name": { - "$gt": "A" - } - } + selector = {"age": {"$gt": 10}, "cars": 2, "name": {"$gt": "A"}} explain = self.db.find(selector, sort=["age"], explain=True) self.assertEqual(explain["index"]["name"], "cars-age-name") def test_not_use_index_if_other_fields_in_sort(self): self.db.create_index(["cars", "age"], name="cars-age") - selector = { - "age": 10, - "cars": { - "$gt": "1" - } - } + selector = {"age": 10, "cars": {"$gt": "1"}} try: self.db.find(selector, sort=["cars", "name"], explain=True) raise Exception("Should not get here") diff --git a/src/mango/test/19-find-conflicts.py b/src/mango/test/19-find-conflicts.py index c6d59f00df6..bf865d6ea83 100644 --- a/src/mango/test/19-find-conflicts.py +++ b/src/mango/test/19-find-conflicts.py @@ -13,20 +13,10 @@ import mango import copy -DOC = [ - { - "_id": "doc", - "a": 2 - } -] +DOC = [{"_id": "doc", "a": 2}] + +CONFLICT = [{"_id": "doc", "_rev": "1-23202479633c2b380f79507a776743d5", "a": 1}] -CONFLICT = [ - { - "_id": "doc", - "_rev": "1-23202479633c2b380f79507a776743d5", - "a": 1 - } -] class ChooseCorrectIndexForDocs(mango.DbPerClass): def setUp(self): @@ -36,6 +26,8 @@ def setUp(self): def test_retrieve_conflicts(self): self.db.create_index(["_conflicts"]) - result = self.db.find({"_conflicts": { "$exists": True}}, conflicts=True) - self.assertEqual(result[0]['_conflicts'][0], '1-23202479633c2b380f79507a776743d5') - self.assertEqual(result[0]['_rev'], '1-3975759ccff3842adf690a5c10caee42') + result = self.db.find({"_conflicts": {"$exists": True}}, conflicts=True) + self.assertEqual( + result[0]["_conflicts"][0], "1-23202479633c2b380f79507a776743d5" + ) + self.assertEqual(result[0]["_rev"], "1-3975759ccff3842adf690a5c10caee42") diff --git a/src/mango/test/20-no-timeout-test.py b/src/mango/test/20-no-timeout-test.py index 93dc146a350..cffdfc33500 100644 --- a/src/mango/test/20-no-timeout-test.py +++ b/src/mango/test/20-no-timeout-test.py @@ -14,25 +14,19 @@ import copy import unittest -class LongRunningMangoTest(mango.DbPerClass): +class LongRunningMangoTest(mango.DbPerClass): def setUp(self): self.db.recreate() docs = [] for i in range(100000): - docs.append({ - "_id": str(i), - "another": "field" - }) + docs.append({"_id": str(i), "another": "field"}) if i % 20000 == 0: self.db.save_docs(docs) docs = [] - - # This test should run to completion and not timeout + + # This test should run to completion and not timeout def test_query_does_not_time_out(self): - selector = { - "_id": {"$gt": 0}, - "another": "wrong" - } + selector = {"_id": {"$gt": 0}, "another": "wrong"} docs = self.db.find(selector) self.assertEqual(len(docs), 0) diff --git a/src/mango/test/friend_docs.py b/src/mango/test/friend_docs.py index 075796138d9..c6442267e49 100644 --- a/src/mango/test/friend_docs.py +++ b/src/mango/test/friend_docs.py @@ -54,551 +54,227 @@ def add_text_indexes(db): db.create_text_index() -DOCS = [ +DOCS = [ { "_id": "54a43171d37ae5e81bff5ae0", "user_id": 0, - "name": { - "first": "Ochoa", - "last": "Fox" - }, + "name": {"first": "Ochoa", "last": "Fox"}, "friends": [ { "id": 0, - "name": { - "first": "Sherman", - "last": "Davidson" - }, - "type": "personal" + "name": {"first": "Sherman", "last": "Davidson"}, + "type": "personal", }, { "id": 1, - "name": { - "first": "Vargas", - "last": "Mendez" - }, - "type": "personal" + "name": {"first": "Vargas", "last": "Mendez"}, + "type": "personal", }, - { - "id": 2, - "name": { - "first": "Sheppard", - "last": "Cotton" - }, - "type": "work" - } - ] + {"id": 2, "name": {"first": "Sheppard", "last": "Cotton"}, "type": "work"}, + ], }, { "_id": "54a43171958485dc32917c50", "user_id": 1, - "name": { - "first": "Sheppard", - "last": "Cotton" - }, + "name": {"first": "Sheppard", "last": "Cotton"}, "friends": [ - { - "id": 0, - "name": { - "first": "Ochoa", - "last": "Fox" - }, - "type": "work" - }, + {"id": 0, "name": {"first": "Ochoa", "last": "Fox"}, "type": "work"}, { "id": 1, - "name": { - "first": "Vargas", - "last": "Mendez" - }, - "type": "personal" + "name": {"first": "Vargas", "last": "Mendez"}, + "type": "personal", }, - { - "id": 2, - "name": { - "first": "Kendra", - "last": "Burns" - }, - "type": "work" - } - ] + {"id": 2, "name": {"first": "Kendra", "last": "Burns"}, "type": "work"}, + ], }, { "_id": "54a431711cf025ba74bea899", "user_id": 2, - "name": { - "first": "Hunter", - "last": "Wells" - }, + "name": {"first": "Hunter", "last": "Wells"}, "friends": [ - { - "id": 0, - "name": { - "first": "Estes", - "last": "Fischer" - }, - "type": "work" - }, + {"id": 0, "name": {"first": "Estes", "last": "Fischer"}, "type": "work"}, { "id": 1, - "name": { - "first": "Farrell", - "last": "Maddox" - }, - "type": "personal" + "name": {"first": "Farrell", "last": "Maddox"}, + "type": "personal", }, - { - "id": 2, - "name": { - "first": "Kendra", - "last": "Burns" - }, - "type": "work" - } - ] + {"id": 2, "name": {"first": "Kendra", "last": "Burns"}, "type": "work"}, + ], }, { "_id": "54a4317151a70a9881ac28a4", "user_id": 3, - "name": { - "first": "Millicent", - "last": "Guy" - }, + "name": {"first": "Millicent", "last": "Guy"}, "friends": [ - { - "id": 0, - "name": { - "first": "Luella", - "last": "Mendoza" - }, - "type": "work" - }, + {"id": 0, "name": {"first": "Luella", "last": "Mendoza"}, "type": "work"}, { "id": 1, - "name": { - "first": "Melanie", - "last": "Foster" - }, - "type": "personal" + "name": {"first": "Melanie", "last": "Foster"}, + "type": "personal", }, - { - "id": 2, - "name": { - "first": "Hopkins", - "last": "Scott" - }, - "type": "work" - } - ] + {"id": 2, "name": {"first": "Hopkins", "last": "Scott"}, "type": "work"}, + ], }, { "_id": "54a43171d946b78703a0e076", "user_id": 4, - "name": { - "first": "Elisabeth", - "last": "Brady" - }, + "name": {"first": "Elisabeth", "last": "Brady"}, "friends": [ - { - "id": 0, - "name": { - "first": "Sofia", - "last": "Workman" - }, - "type": "work" - }, - { - "id": 1, - "name": { - "first": "Alisha", - "last": "Reilly" - }, - "type": "work" - }, - { - "id": 2, - "name": { - "first": "Ochoa", - "last": "Burch" - }, - "type": "personal" - } - ] + {"id": 0, "name": {"first": "Sofia", "last": "Workman"}, "type": "work"}, + {"id": 1, "name": {"first": "Alisha", "last": "Reilly"}, "type": "work"}, + {"id": 2, "name": {"first": "Ochoa", "last": "Burch"}, "type": "personal"}, + ], }, { "_id": "54a4317118abd7f1992464ee", "user_id": 5, - "name": { - "first": "Pollard", - "last": "French" - }, + "name": {"first": "Pollard", "last": "French"}, "friends": [ { "id": 0, - "name": { - "first": "Hollie", - "last": "Juarez" - }, - "type": "personal" + "name": {"first": "Hollie", "last": "Juarez"}, + "type": "personal", }, - { - "id": 1, - "name": { - "first": "Nelda", - "last": "Newton" - }, - "type": "personal" - }, - { - "id": 2, - "name": { - "first": "Yang", - "last": "Pace" - }, - "type": "personal" - } - ] + {"id": 1, "name": {"first": "Nelda", "last": "Newton"}, "type": "personal"}, + {"id": 2, "name": {"first": "Yang", "last": "Pace"}, "type": "personal"}, + ], }, { "_id": "54a43171f139e63d6579121e", "user_id": 6, - "name": { - "first": "Acevedo", - "last": "Morales" - }, + "name": {"first": "Acevedo", "last": "Morales"}, "friends": [ - { - "id": 0, - "name": { - "first": "Payne", - "last": "Berry" - }, - "type": "personal" - }, + {"id": 0, "name": {"first": "Payne", "last": "Berry"}, "type": "personal"}, { "id": 1, - "name": { - "first": "Rene", - "last": "Valenzuela" - }, - "type": "personal" + "name": {"first": "Rene", "last": "Valenzuela"}, + "type": "personal", }, - { - "id": 2, - "name": { - "first": "Dora", - "last": "Gallegos" - }, - "type": "work" - } - ] + {"id": 2, "name": {"first": "Dora", "last": "Gallegos"}, "type": "work"}, + ], }, { "_id": "54a431719783cef80876dde8", "user_id": 7, - "name": { - "first": "Cervantes", - "last": "Marquez" - }, + "name": {"first": "Cervantes", "last": "Marquez"}, "friends": [ { "id": 0, - "name": { - "first": "Maxwell", - "last": "Norman" - }, - "type": "personal" - }, - { - "id": 1, - "name": { - "first": "Shields", - "last": "Bass" - }, - "type": "personal" + "name": {"first": "Maxwell", "last": "Norman"}, + "type": "personal", }, - { - "id": 2, - "name": { - "first": "Luz", - "last": "Jacobson" - }, - "type": "work" - } - ] + {"id": 1, "name": {"first": "Shields", "last": "Bass"}, "type": "personal"}, + {"id": 2, "name": {"first": "Luz", "last": "Jacobson"}, "type": "work"}, + ], }, { "_id": "54a43171ecc7540d1f7aceae", "user_id": 8, - "name": { - "first": "West", - "last": "Morrow" - }, + "name": {"first": "West", "last": "Morrow"}, "friends": [ { "id": 0, - "name": { - "first": "Townsend", - "last": "Dixon" - }, - "type": "personal" + "name": {"first": "Townsend", "last": "Dixon"}, + "type": "personal", }, { "id": 1, - "name": { - "first": "Callahan", - "last": "Buck" - }, - "type": "personal" + "name": {"first": "Callahan", "last": "Buck"}, + "type": "personal", }, { "id": 2, - "name": { - "first": "Rachel", - "last": "Fletcher" - }, - "type": "personal" - } - ] + "name": {"first": "Rachel", "last": "Fletcher"}, + "type": "personal", + }, + ], }, { "_id": "54a4317113e831f4af041a0a", "user_id": 9, - "name": { - "first": "Cotton", - "last": "House" - }, + "name": {"first": "Cotton", "last": "House"}, "friends": [ { "id": 0, - "name": { - "first": "Mckenzie", - "last": "Medina" - }, - "type": "personal" - }, - { - "id": 1, - "name": { - "first": "Cecilia", - "last": "Miles" - }, - "type": "work" + "name": {"first": "Mckenzie", "last": "Medina"}, + "type": "personal", }, - { - "id": 2, - "name": { - "first": "Guerra", - "last": "Cervantes" - }, - "type": "work" - } - ] + {"id": 1, "name": {"first": "Cecilia", "last": "Miles"}, "type": "work"}, + {"id": 2, "name": {"first": "Guerra", "last": "Cervantes"}, "type": "work"}, + ], }, { "_id": "54a43171686eb1f48ebcbe01", "user_id": 10, - "name": { - "first": "Wright", - "last": "Rivas" - }, + "name": {"first": "Wright", "last": "Rivas"}, "friends": [ { "id": 0, - "name": { - "first": "Campos", - "last": "Freeman" - }, - "type": "personal" + "name": {"first": "Campos", "last": "Freeman"}, + "type": "personal", }, { "id": 1, - "name": { - "first": "Christian", - "last": "Ferguson" - }, - "type": "personal" + "name": {"first": "Christian", "last": "Ferguson"}, + "type": "personal", }, - { - "id": 2, - "name": { - "first": "Doreen", - "last": "Wilder" - }, - "type": "work" - } - ] + {"id": 2, "name": {"first": "Doreen", "last": "Wilder"}, "type": "work"}, + ], }, { "_id": "54a43171a4f3d5638c162f4f", "user_id": 11, - "name": { - "first": "Lorene", - "last": "Dorsey" - }, + "name": {"first": "Lorene", "last": "Dorsey"}, "friends": [ { "id": 0, - "name": { - "first": "Gibbs", - "last": "Mccarty" - }, - "type": "personal" + "name": {"first": "Gibbs", "last": "Mccarty"}, + "type": "personal", }, - { - "id": 1, - "name": { - "first": "Neal", - "last": "Franklin" - }, - "type": "work" - }, - { - "id": 2, - "name": { - "first": "Kristy", - "last": "Head" - }, - "type": "personal" - } + {"id": 1, "name": {"first": "Neal", "last": "Franklin"}, "type": "work"}, + {"id": 2, "name": {"first": "Kristy", "last": "Head"}, "type": "personal"}, ], - "bestfriends" : ["Wolverine", "Cyclops"] + "bestfriends": ["Wolverine", "Cyclops"], }, { "_id": "54a431719faa420a5b4fbeb0", "user_id": 12, - "name": { - "first": "Juanita", - "last": "Cook" - }, + "name": {"first": "Juanita", "last": "Cook"}, "friends": [ - { - "id": 0, - "name": { - "first": "Wilkins", - "last": "Chang" - }, - "type": "work" - }, - { - "id": 1, - "name": { - "first": "Haney", - "last": "Rivera" - }, - "type": "work" - }, - { - "id": 2, - "name": { - "first": "Lauren", - "last": "Manning" - }, - "type": "work" - } - ] + {"id": 0, "name": {"first": "Wilkins", "last": "Chang"}, "type": "work"}, + {"id": 1, "name": {"first": "Haney", "last": "Rivera"}, "type": "work"}, + {"id": 2, "name": {"first": "Lauren", "last": "Manning"}, "type": "work"}, + ], }, { "_id": "54a43171e65d35f9ee8c53c0", "user_id": 13, - "name": { - "first": "Levy", - "last": "Osborn" - }, + "name": {"first": "Levy", "last": "Osborn"}, "friends": [ - { - "id": 0, - "name": { - "first": "Vinson", - "last": "Vargas" - }, - "type": "work" - }, - { - "id": 1, - "name": { - "first": "Felicia", - "last": "Beach" - }, - "type": "work" - }, - { - "id": 2, - "name": { - "first": "Nadine", - "last": "Kemp" - }, - "type": "work" - } + {"id": 0, "name": {"first": "Vinson", "last": "Vargas"}, "type": "work"}, + {"id": 1, "name": {"first": "Felicia", "last": "Beach"}, "type": "work"}, + {"id": 2, "name": {"first": "Nadine", "last": "Kemp"}, "type": "work"}, ], - "results": [ 82, 85, 88 ] + "results": [82, 85, 88], }, { "_id": "54a4317132f2c81561833259", "user_id": 14, - "name": { - "first": "Christina", - "last": "Raymond" - }, + "name": {"first": "Christina", "last": "Raymond"}, "friends": [ - { - "id": 0, - "name": { - "first": "Herrera", - "last": "Walton" - }, - "type": "work" - }, - { - "id": 1, - "name": { - "first": "Hahn", - "last": "Rutledge" - }, - "type": "work" - }, - { - "id": 2, - "name": { - "first": "Stacie", - "last": "Harding" - }, - "type": "work" - } - ] + {"id": 0, "name": {"first": "Herrera", "last": "Walton"}, "type": "work"}, + {"id": 1, "name": {"first": "Hahn", "last": "Rutledge"}, "type": "work"}, + {"id": 2, "name": {"first": "Stacie", "last": "Harding"}, "type": "work"}, + ], }, { "_id": "589f32af493145f890e1b051", "user_id": 15, - "name": { - "first": "Tanisha", - "last": "Bowers" - }, + "name": {"first": "Tanisha", "last": "Bowers"}, "friends": [ - { - "id": 0, - "name": { - "first": "Ochoa", - "last": "Pratt" - }, - "type": "personal" - }, - { - "id": 1, - "name": { - "first": "Ochoa", - "last": "Romero" - }, - "type": "personal" - }, - { - "id": 2, - "name": { - "first": "Ochoa", - "last": "Bowman" - }, - "type": "work" - } - ] - } + {"id": 0, "name": {"first": "Ochoa", "last": "Pratt"}, "type": "personal"}, + {"id": 1, "name": {"first": "Ochoa", "last": "Romero"}, "type": "personal"}, + {"id": 2, "name": {"first": "Ochoa", "last": "Bowman"}, "type": "work"}, + ], + }, ] diff --git a/src/mango/test/limit_docs.py b/src/mango/test/limit_docs.py index 53ab5232d89..6c12790beea 100644 --- a/src/mango/test/limit_docs.py +++ b/src/mango/test/limit_docs.py @@ -26,383 +26,80 @@ def add_text_indexes(db): db.create_text_index() -DOCS = [ - { - "_id": "54af50626de419f5109c962f", - "user_id": 0, - "age": 10 - }, - { - "_id": "54af50622071121b25402dc3", - "user_id": 1, - "age": 11 - - }, - { - "_id": "54af50623809e19159a3cdd0", - "user_id": 2, - "age": 12 - }, - { - "_id": "54af50629f45a0f49a441d01", - "user_id": 3, - "age": 13 - - }, - { - "_id": "54af50620f1755c22359a362", - "user_id": 4, - "age": 14 - }, - { - "_id": "54af5062dd6f6c689ad2ca23", - "user_id": 5, - "age": 15 - }, - { - "_id": "54af50623e89b432be1187b8", - "user_id": 6, - "age": 16 - }, - { - "_id": "54af5062932a00270a3b5ab0", - "user_id": 7, - "age": 17 - - }, - { - "_id": "54af5062df773d69174e3345", - "filtered_array" : [1, 2, 3], - "age": 18 - }, - { - "_id": "54af50629c1153b9e21e346d", - "filtered_array" : [1, 2, 3], - "age": 19 - }, - { - "_id": "54af5062dabb7cc4b60e0c95", - "user_id": 10, - "age": 20 - }, - { - "_id": "54af5062204996970a4439a2", - "user_id": 11, - "age": 21 - }, - { - "_id": "54af50629cea39e8ea52bfac", - "user_id": 12, - "age": 22 - }, - { - "_id": "54af50620597c094f75db2a1", - "user_id": 13, - "age": 23 - }, - { - "_id": "54af50628d4048de0010723c", - "user_id": 14, - "age": 24 - }, - { - "_id": "54af5062f339b6f44f52faf6", - "user_id": 15, - "age": 25 - }, - { - "_id": "54af5062a893f17ea4402031", - "user_id": 16, - "age": 26 - }, - { - "_id": "54af5062323dbc7077deb60a", - "user_id": 17, - "age": 27 - }, - { - "_id": "54af506224db85bd7fcd0243", - "filtered_array" : [1, 2, 3], - "age": 28 - }, - { - "_id": "54af506255bb551c9cc251bf", - "filtered_array" : [1, 2, 3], - "age": 29 - }, - { - "_id": "54af50625a97394e07d718a1", - "filtered_array" : [1, 2, 3], - "age": 30 - }, - { - "_id": "54af506223f51d586b4ef529", - "user_id": 21, - "age": 31 - }, - { - "_id": "54af50622740dede7d6117b7", - "user_id": 22, - "age": 32 - }, - { - "_id": "54af50624efc87684a52e8fb", - "user_id": 23, - "age": 33 - }, - { - "_id": "54af5062f40932760347799c", - "user_id": 24, - "age": 34 - }, - { - "_id": "54af5062d9f7361951ac645d", - "user_id": 25, - "age": 35 - }, - { - "_id": "54af5062f89aef302b37c3bc", - "filtered_array" : [1, 2, 3], - "age": 36 - }, - { - "_id": "54af5062498ec905dcb351f8", - "filtered_array" : [1, 2, 3], - "age": 37 - }, - { - "_id": "54af5062b1d2f2c5a85bdd7e", - "user_id": 28, - "age": 38 - }, - { - "_id": "54af50625061029c0dd942b5", - "filtered_array" : [1, 2, 3], - "age": 39 - }, - { - "_id": "54af50628b0d08a1d23c030a", - "user_id": 30, - "age": 40 - }, - { - "_id": "54af506271b6e3119eb31d46", - "filtered_array" : [1, 2, 3], - "age": 41 - }, - { - "_id": "54af5062b69f46424dfcf3e5", - "user_id": 32, - "age": 42 - }, - { - "_id": "54af5062ed00c7dbe4d1bdcf", - "user_id": 33, - "age": 43 - }, - { - "_id": "54af5062fb64e45180c9a90d", - "user_id": 34, - "age": 44 - }, - { - "_id": "54af5062241c72b067127b09", - "user_id": 35, - "age": 45 - }, - { - "_id": "54af50626a467d8b781a6d06", - "user_id": 36, - "age": 46 - }, - { - "_id": "54af50620e992d60af03bf86", - "filtered_array" : [1, 2, 3], - "age": 47 - }, - { - "_id": "54af506254f992aa3c51532f", - "user_id": 38, - "age": 48 - }, - { - "_id": "54af5062e99b20f301de39b9", - "user_id": 39, - "age": 49 - }, - { - "_id": "54af50624fbade6b11505b5d", - "user_id": 40, - "age": 50 - }, - { - "_id": "54af506278ad79b21e807ae4", - "user_id": 41, - "age": 51 - }, - { - "_id": "54af5062fc7a1dcb33f31d08", - "user_id": 42, - "age": 52 - }, - { - "_id": "54af5062ea2c954c650009cf", - "user_id": 43, - "age": 53 - }, - { - "_id": "54af506213576c2f09858266", - "user_id": 44, - "age": 54 - }, - { - "_id": "54af50624a05ac34c994b1c0", - "user_id": 45, - "age": 55 - }, - { - "_id": "54af50625a624983edf2087e", - "user_id": 46, - "age": 56 - }, - { - "_id": "54af50623de488c49d064355", - "user_id": 47, - "age": 57 - }, - { - "_id": "54af5062628b5df08661a9d5", - "user_id": 48, - "age": 58 - }, - { - "_id": "54af50620c706fc23032ae62", - "user_id": 49, - "age": 59 - }, - { - "_id": "54af5062509f1e2371fe1da4", - "user_id": 50, - "age": 60 - }, - { - "_id": "54af50625e96b22436791653", - "user_id": 51, - "age": 61 - }, - { - "_id": "54af5062a9cb71463bb9577f", - "user_id": 52, - "age": 62 - }, - { - "_id": "54af50624fea77a4221a4baf", - "user_id": 53, - "age": 63 - }, - { - "_id": "54af5062c63df0a147d2417e", - "user_id": 54, - "age": 64 - }, - { - "_id": "54af50623c56d78029316c9f", - "user_id": 55, - "age": 65 - }, - { - "_id": "54af5062167f6e13aa0dd014", - "user_id": 56, - "age": 66 - }, - { - "_id": "54af50621558abe77797d137", - "filtered_array" : [1, 2, 3], - "age": 67 - }, - { - "_id": "54af50624d5b36aa7cb5fa77", - "user_id": 58, - "age": 68 - }, - { - "_id": "54af50620d79118184ae66bd", - "user_id": 59, - "age": 69 - }, - { - "_id": "54af5062d18aafa5c4ca4935", - "user_id": 60, - "age": 71 - }, - { - "_id": "54af5062fd22a409649962f4", - "filtered_array" : [1, 2, 3], - "age": 72 - }, - { - "_id": "54af5062e31045a1908e89f9", - "user_id": 62, - "age": 73 - }, - { - "_id": "54af50624c062fcb4c59398b", - "user_id": 63, - "age": 74 - }, - { - "_id": "54af506241ec83430a15957f", - "user_id": 64, - "age": 75 - }, - { - "_id": "54af506224d0f888ae411101", - "user_id": 65, - "age": 76 - }, - { - "_id": "54af506272a971c6cf3ab6b8", - "user_id": 66, - "age": 77 - }, - { - "_id": "54af506221e25b485c95355b", - "user_id": 67, - "age": 78 - }, - { - "_id": "54af5062800f7f2ca73e9623", - "user_id": 68, - "age": 79 - }, - { - "_id": "54af5062bc962da30740534a", - "user_id": 69, - "age": 80 - }, - { - "_id": "54af50625102d6e210fc2efd", - "filtered_array" : [1, 2, 3], - "age": 81 - }, - { - "_id": "54af5062e014b9d039f02c5e", - "user_id": 71, - "age": 82 - }, - { - "_id": "54af5062fbd5e801dd217515", - "user_id": 72, - "age": 83 - }, - { - "_id": "54af50629971992b658fcb88", - "user_id": 73, - "age": 84 - }, - { - "_id": "54af5062607d53416c30bafd", - "filtered_array" : [1, 2, 3], - "age": 85 - } +DOCS = [ + {"_id": "54af50626de419f5109c962f", "user_id": 0, "age": 10}, + {"_id": "54af50622071121b25402dc3", "user_id": 1, "age": 11}, + {"_id": "54af50623809e19159a3cdd0", "user_id": 2, "age": 12}, + {"_id": "54af50629f45a0f49a441d01", "user_id": 3, "age": 13}, + {"_id": "54af50620f1755c22359a362", "user_id": 4, "age": 14}, + {"_id": "54af5062dd6f6c689ad2ca23", "user_id": 5, "age": 15}, + {"_id": "54af50623e89b432be1187b8", "user_id": 6, "age": 16}, + {"_id": "54af5062932a00270a3b5ab0", "user_id": 7, "age": 17}, + {"_id": "54af5062df773d69174e3345", "filtered_array": [1, 2, 3], "age": 18}, + {"_id": "54af50629c1153b9e21e346d", "filtered_array": [1, 2, 3], "age": 19}, + {"_id": "54af5062dabb7cc4b60e0c95", "user_id": 10, "age": 20}, + {"_id": "54af5062204996970a4439a2", "user_id": 11, "age": 21}, + {"_id": "54af50629cea39e8ea52bfac", "user_id": 12, "age": 22}, + {"_id": "54af50620597c094f75db2a1", "user_id": 13, "age": 23}, + {"_id": "54af50628d4048de0010723c", "user_id": 14, "age": 24}, + {"_id": "54af5062f339b6f44f52faf6", "user_id": 15, "age": 25}, + {"_id": "54af5062a893f17ea4402031", "user_id": 16, "age": 26}, + {"_id": "54af5062323dbc7077deb60a", "user_id": 17, "age": 27}, + {"_id": "54af506224db85bd7fcd0243", "filtered_array": [1, 2, 3], "age": 28}, + {"_id": "54af506255bb551c9cc251bf", "filtered_array": [1, 2, 3], "age": 29}, + {"_id": "54af50625a97394e07d718a1", "filtered_array": [1, 2, 3], "age": 30}, + {"_id": "54af506223f51d586b4ef529", "user_id": 21, "age": 31}, + {"_id": "54af50622740dede7d6117b7", "user_id": 22, "age": 32}, + {"_id": "54af50624efc87684a52e8fb", "user_id": 23, "age": 33}, + {"_id": "54af5062f40932760347799c", "user_id": 24, "age": 34}, + {"_id": "54af5062d9f7361951ac645d", "user_id": 25, "age": 35}, + {"_id": "54af5062f89aef302b37c3bc", "filtered_array": [1, 2, 3], "age": 36}, + {"_id": "54af5062498ec905dcb351f8", "filtered_array": [1, 2, 3], "age": 37}, + {"_id": "54af5062b1d2f2c5a85bdd7e", "user_id": 28, "age": 38}, + {"_id": "54af50625061029c0dd942b5", "filtered_array": [1, 2, 3], "age": 39}, + {"_id": "54af50628b0d08a1d23c030a", "user_id": 30, "age": 40}, + {"_id": "54af506271b6e3119eb31d46", "filtered_array": [1, 2, 3], "age": 41}, + {"_id": "54af5062b69f46424dfcf3e5", "user_id": 32, "age": 42}, + {"_id": "54af5062ed00c7dbe4d1bdcf", "user_id": 33, "age": 43}, + {"_id": "54af5062fb64e45180c9a90d", "user_id": 34, "age": 44}, + {"_id": "54af5062241c72b067127b09", "user_id": 35, "age": 45}, + {"_id": "54af50626a467d8b781a6d06", "user_id": 36, "age": 46}, + {"_id": "54af50620e992d60af03bf86", "filtered_array": [1, 2, 3], "age": 47}, + {"_id": "54af506254f992aa3c51532f", "user_id": 38, "age": 48}, + {"_id": "54af5062e99b20f301de39b9", "user_id": 39, "age": 49}, + {"_id": "54af50624fbade6b11505b5d", "user_id": 40, "age": 50}, + {"_id": "54af506278ad79b21e807ae4", "user_id": 41, "age": 51}, + {"_id": "54af5062fc7a1dcb33f31d08", "user_id": 42, "age": 52}, + {"_id": "54af5062ea2c954c650009cf", "user_id": 43, "age": 53}, + {"_id": "54af506213576c2f09858266", "user_id": 44, "age": 54}, + {"_id": "54af50624a05ac34c994b1c0", "user_id": 45, "age": 55}, + {"_id": "54af50625a624983edf2087e", "user_id": 46, "age": 56}, + {"_id": "54af50623de488c49d064355", "user_id": 47, "age": 57}, + {"_id": "54af5062628b5df08661a9d5", "user_id": 48, "age": 58}, + {"_id": "54af50620c706fc23032ae62", "user_id": 49, "age": 59}, + {"_id": "54af5062509f1e2371fe1da4", "user_id": 50, "age": 60}, + {"_id": "54af50625e96b22436791653", "user_id": 51, "age": 61}, + {"_id": "54af5062a9cb71463bb9577f", "user_id": 52, "age": 62}, + {"_id": "54af50624fea77a4221a4baf", "user_id": 53, "age": 63}, + {"_id": "54af5062c63df0a147d2417e", "user_id": 54, "age": 64}, + {"_id": "54af50623c56d78029316c9f", "user_id": 55, "age": 65}, + {"_id": "54af5062167f6e13aa0dd014", "user_id": 56, "age": 66}, + {"_id": "54af50621558abe77797d137", "filtered_array": [1, 2, 3], "age": 67}, + {"_id": "54af50624d5b36aa7cb5fa77", "user_id": 58, "age": 68}, + {"_id": "54af50620d79118184ae66bd", "user_id": 59, "age": 69}, + {"_id": "54af5062d18aafa5c4ca4935", "user_id": 60, "age": 71}, + {"_id": "54af5062fd22a409649962f4", "filtered_array": [1, 2, 3], "age": 72}, + {"_id": "54af5062e31045a1908e89f9", "user_id": 62, "age": 73}, + {"_id": "54af50624c062fcb4c59398b", "user_id": 63, "age": 74}, + {"_id": "54af506241ec83430a15957f", "user_id": 64, "age": 75}, + {"_id": "54af506224d0f888ae411101", "user_id": 65, "age": 76}, + {"_id": "54af506272a971c6cf3ab6b8", "user_id": 66, "age": 77}, + {"_id": "54af506221e25b485c95355b", "user_id": 67, "age": 78}, + {"_id": "54af5062800f7f2ca73e9623", "user_id": 68, "age": 79}, + {"_id": "54af5062bc962da30740534a", "user_id": 69, "age": 80}, + {"_id": "54af50625102d6e210fc2efd", "filtered_array": [1, 2, 3], "age": 81}, + {"_id": "54af5062e014b9d039f02c5e", "user_id": 71, "age": 82}, + {"_id": "54af5062fbd5e801dd217515", "user_id": 72, "age": 83}, + {"_id": "54af50629971992b658fcb88", "user_id": 73, "age": 84}, + {"_id": "54af5062607d53416c30bafd", "filtered_array": [1, 2, 3], "age": 85}, ] diff --git a/src/mango/test/mango.py b/src/mango/test/mango.py index dfe220d2b1d..de8a638a81d 100644 --- a/src/mango/test/mango.py +++ b/src/mango/test/mango.py @@ -26,13 +26,16 @@ def random_db_name(): return "mango_test_" + uuid.uuid4().hex + def has_text_service(): - return os.environ.get('MANGO_TEXT_INDEXES') == '1' + return os.environ.get("MANGO_TEXT_INDEXES") == "1" + def get_from_environment(key, default): value = os.environ.get(key) return value if value is not None else default + # add delay functionality def delay(n=5, t=0.5): for i in range(0, n): @@ -40,13 +43,18 @@ def delay(n=5, t=0.5): class Database(object): - def __init__(self, dbname, - host="127.0.0.1", port="15984", - user='testuser', password='testpass'): - root_url = get_from_environment('COUCH_HOST', "http://{}:{}".format(host, port)) - auth_header = get_from_environment('COUCH_AUTH_HEADER', None) - user = get_from_environment('COUCH_USER', user) - password = get_from_environment('COUCH_PASSWORD', password) + def __init__( + self, + dbname, + host="127.0.0.1", + port="15984", + user="testuser", + password="testpass", + ): + root_url = get_from_environment("COUCH_HOST", "http://{}:{}".format(host, port)) + auth_header = get_from_environment("COUCH_AUTH_HEADER", None) + user = get_from_environment("COUCH_USER", user) + password = get_from_environment("COUCH_PASSWORD", password) self.root_url = root_url self.dbname = dbname @@ -61,7 +69,6 @@ def __init__(self, dbname, self.sess.headers["Content-Type"] = "application/json" - @property def url(self): return "{}/{}".format(self.root_url, self.dbname) @@ -74,7 +81,7 @@ def path(self, parts): def create(self, q=1, n=1): r = self.sess.get(self.url) if r.status_code == 404: - r = self.sess.put(self.url, params={"q":q, "n": n}) + r = self.sess.put(self.url, params={"q": q, "n": n}) r.raise_for_status() def delete(self): @@ -116,7 +123,7 @@ def open_doc(self, docid): def delete_doc(self, docid): r = self.sess.get(self.path(docid)) r.raise_for_status() - original_rev = r.json()['_rev'] + original_rev = r.json()["_rev"] self.sess.delete(self.path(docid), params={"rev": original_rev}) def ddoc_info(self, ddocid): @@ -124,15 +131,16 @@ def ddoc_info(self, ddocid): r.raise_for_status() return r.json() - def create_index(self, fields, idx_type="json", name=None, ddoc=None, - partial_filter_selector=None, selector=None): - body = { - "index": { - "fields": fields - }, - "type": idx_type, - "w": 3 - } + def create_index( + self, + fields, + idx_type="json", + name=None, + ddoc=None, + partial_filter_selector=None, + selector=None, + ): + body = {"index": {"fields": fields}, "type": idx_type, "w": 3} if name is not None: body["name"] = name if ddoc is not None: @@ -155,15 +163,19 @@ def create_index(self, fields, idx_type="json", name=None, ddoc=None, return created - def create_text_index(self, analyzer=None, idx_type="text", - partial_filter_selector=None, selector=None, default_field=None, fields=None, - name=None, ddoc=None,index_array_lengths=None): - body = { - "index": { - }, - "type": idx_type, - "w": 3, - } + def create_text_index( + self, + analyzer=None, + idx_type="text", + partial_filter_selector=None, + selector=None, + default_field=None, + fields=None, + name=None, + ddoc=None, + index_array_lengths=None, + ): + body = {"index": {}, "type": idx_type, "w": 3} if name is not None: body["name"] = name if analyzer is not None: @@ -190,10 +202,10 @@ def list_indexes(self, limit="", skip=""): limit = "limit=" + str(limit) if skip != "": skip = "skip=" + str(skip) - r = self.sess.get(self.path("_index?"+limit+";"+skip)) + r = self.sess.get(self.path("_index?" + limit + ";" + skip)) r.raise_for_status() return r.json()["indexes"] - + def get_index(self, ddocid, name): if ddocid is None: return [i for i in self.list_indexes() if i["name"] == name] @@ -205,7 +217,11 @@ def get_index(self, ddocid, name): if name is None: return [i for i in self.list_indexes() if i["ddoc"] == ddocid] else: - return [i for i in self.list_indexes() if i["ddoc"] == ddocid and i["name"] == name] + return [ + i + for i in self.list_indexes() + if i["ddoc"] == ddocid and i["name"] == name + ] def delete_index(self, ddocid, name, idx_type="json"): path = ["_index", ddocid, idx_type, name] @@ -216,24 +232,34 @@ def delete_index(self, ddocid, name, idx_type="json"): delay(t=0.1) def bulk_delete(self, docs): - body = { - "docids" : docs, - "w": 3 - } + body = {"docids": docs, "w": 3} body = json.dumps(body) r = self.sess.post(self.path("_index/_bulk_delete"), data=body) return r.json() - def find(self, selector, limit=25, skip=0, sort=None, fields=None, - r=1, conflicts=False, use_index=None, explain=False, - bookmark=None, return_raw=False, update=True, executionStats=False): + def find( + self, + selector, + limit=25, + skip=0, + sort=None, + fields=None, + r=1, + conflicts=False, + use_index=None, + explain=False, + bookmark=None, + return_raw=False, + update=True, + executionStats=False, + ): body = { "selector": selector, "use_index": use_index, "limit": limit, "skip": skip, "r": r, - "conflicts": conflicts + "conflicts": conflicts, } if sort is not None: body["sort"] = sort @@ -268,7 +294,6 @@ def find_one(self, *args, **kwargs): class UsersDbTests(unittest.TestCase): - @classmethod def setUpClass(klass): klass.db = Database("_users") @@ -279,7 +304,6 @@ def setUp(self): class DbPerClass(unittest.TestCase): - @classmethod def setUpClass(klass): klass.db = Database(random_db_name()) @@ -290,7 +314,6 @@ def setUp(self): class UserDocsTests(DbPerClass): - @classmethod def setUpClass(klass): super(UserDocsTests, klass).setUpClass() @@ -298,14 +321,10 @@ def setUpClass(klass): class UserDocsTestsNoIndexes(DbPerClass): - @classmethod def setUpClass(klass): super(UserDocsTestsNoIndexes, klass).setUpClass() - user_docs.setup( - klass.db, - index_type="_all_docs" - ) + user_docs.setup(klass.db, index_type="_all_docs") class UserDocsTextTests(DbPerClass): @@ -318,23 +337,22 @@ def setUpClass(klass): super(UserDocsTextTests, klass).setUpClass() if has_text_service(): user_docs.setup( - klass.db, - index_type="text", - default_field=klass.DEFAULT_FIELD, - fields=klass.FIELDS + klass.db, + index_type="text", + default_field=klass.DEFAULT_FIELD, + fields=klass.FIELDS, ) class FriendDocsTextTests(DbPerClass): - @classmethod def setUpClass(klass): super(FriendDocsTextTests, klass).setUpClass() if has_text_service(): friend_docs.setup(klass.db, index_type="text") -class LimitDocsTextTests(DbPerClass): +class LimitDocsTextTests(DbPerClass): @classmethod def setUpClass(klass): super(LimitDocsTextTests, klass).setUpClass() diff --git a/src/mango/test/user_docs.py b/src/mango/test/user_docs.py index 02ffe9ffcd7..afbea710e5e 100644 --- a/src/mango/test/user_docs.py +++ b/src/mango/test/user_docs.py @@ -77,14 +77,14 @@ def add_view_indexes(db, kwargs): "location.state", "location.city", "location.address.street", - "location.address.number" + "location.address.number", ], ["company", "manager"], ["manager"], ["favorites"], ["favorites.3"], ["twitter"], - ["ordered"] + ["ordered"], ] for idx in indexes: assert db.create_index(idx) is True @@ -98,408 +98,253 @@ def add_text_indexes(db, kwargs): { "_id": "71562648-6acb-42bc-a182-df6b1f005b09", "user_id": 0, - "name": { - "first": "Stephanie", - "last": "Kirkland" - }, + "name": {"first": "Stephanie", "last": "Kirkland"}, "age": 48, "location": { "state": "Nevada", "city": "Ronco", - "address": { - "street": "Evergreen Avenue", - "number": 347 - } + "address": {"street": "Evergreen Avenue", "number": 347}, }, "company": "Dreamia", "email": "stephaniekirkland@dreamia.com", "manager": False, "twitter": "@stephaniekirkland", - "favorites": [ - "Ruby", - "C", - "Python" - ], - "test" : [{"a":1}, {"b":2}] + "favorites": ["Ruby", "C", "Python"], + "test": [{"a": 1}, {"b": 2}], }, { "_id": "12a2800c-4fe2-45a8-8d78-c084f4e242a9", "user_id": 1, - "name": { - "first": "Abbott", - "last": "Watson" - }, + "name": {"first": "Abbott", "last": "Watson"}, "age": 31, "location": { "state": "Connecticut", "city": "Gerber", - "address": { - "street": "Huntington Street", - "number": 8987 - } + "address": {"street": "Huntington Street", "number": 8987}, }, "company": "Talkola", "email": "abbottwatson@talkola.com", "manager": False, "twitter": "@abbottwatson", - "favorites": [ - "Ruby", - "Python", - "C", - {"Versions": {"Alpha": "Beta"}} - ], - "test" : [{"a":1, "b":2}] + "favorites": ["Ruby", "Python", "C", {"Versions": {"Alpha": "Beta"}}], + "test": [{"a": 1, "b": 2}], }, { "_id": "48ca0455-8bd0-473f-9ae2-459e42e3edd1", "user_id": 2, - "name": { - "first": "Shelly", - "last": "Ewing" - }, + "name": {"first": "Shelly", "last": "Ewing"}, "age": 42, "location": { "state": "New Mexico", "city": "Thornport", - "address": { - "street": "Miller Avenue", - "number": 7100 - } + "address": {"street": "Miller Avenue", "number": 7100}, }, "company": "Zialactic", "email": "shellyewing@zialactic.com", "manager": True, - "favorites": [ - "Lisp", - "Python", - "Erlang" - ], - "test_in": {"val1" : 1, "val2": "val2"} + "favorites": ["Lisp", "Python", "Erlang"], + "test_in": {"val1": 1, "val2": "val2"}, }, { "_id": "0461444c-e60a-457d-a4bb-b8d811853f21", "user_id": 3, - "name": { - "first": "Madelyn", - "last": "Soto" - }, + "name": {"first": "Madelyn", "last": "Soto"}, "age": 79, "location": { "state": "Utah", "city": "Albany", - "address": { - "street": "Stockholm Street", - "number": 710 - } + "address": {"street": "Stockholm Street", "number": 710}, }, "company": "Tasmania", "email": "madelynsoto@tasmania.com", "manager": True, - "favorites": [[ - "Lisp", - "Erlang", - "Python" - ], - "Erlang", - "C", - "Erlang" - ], + "favorites": [["Lisp", "Erlang", "Python"], "Erlang", "C", "Erlang"], "11111": "number_field", - "22222": {"33333" : "nested_number_field"} + "22222": {"33333": "nested_number_field"}, }, { "_id": "8e1c90c0-ac18-4832-8081-40d14325bde0", "user_id": 4, - "name": { - "first": "Nona", - "last": "Horton" - }, + "name": {"first": "Nona", "last": "Horton"}, "age": 61, "location": { "state": "Georgia", "city": "Corinne", - "address": { - "street": "Woodhull Street", - "number": 6845 - } + "address": {"street": "Woodhull Street", "number": 6845}, }, "company": "Signidyne", "email": "nonahorton@signidyne.com", "manager": False, "twitter": "@nonahorton", - "favorites": [ - "Lisp", - "C", - "Ruby", - "Ruby" - ], - "name.first" : "name dot first" + "favorites": ["Lisp", "C", "Ruby", "Ruby"], + "name.first": "name dot first", }, { "_id": "a33d5457-741a-4dce-a217-3eab28b24e3e", "user_id": 5, - "name": { - "first": "Sheri", - "last": "Perkins" - }, + "name": {"first": "Sheri", "last": "Perkins"}, "age": 73, "location": { "state": "Michigan", "city": "Nutrioso", - "address": { - "street": "Bassett Avenue", - "number": 5648 - } + "address": {"street": "Bassett Avenue", "number": 5648}, }, "company": "Myopium", "email": "sheriperkins@myopium.com", "manager": True, - "favorites": [ - "Lisp", - "Lisp" - ] + "favorites": ["Lisp", "Lisp"], }, { "_id": "b31dad3f-ae8b-4f86-8327-dfe8770beb27", "user_id": 6, - "name": { - "first": "Tate", - "last": "Guy" - }, + "name": {"first": "Tate", "last": "Guy"}, "age": 47, "location": { "state": "Illinois", "city": "Helen", - "address": { - "street": "Schenck Court", - "number": 7392 - } + "address": {"street": "Schenck Court", "number": 7392}, }, "company": "Prosely", "email": "tateguy@prosely.com", "manager": True, - "favorites": [ - "C", - "Lisp", - "Ruby", - "C" - ] + "favorites": ["C", "Lisp", "Ruby", "C"], }, { "_id": "659d0430-b1f4-413a-a6b7-9ea1ef071325", "user_id": 7, - "name": { - "first": "Jewell", - "last": "Stafford" - }, + "name": {"first": "Jewell", "last": "Stafford"}, "age": 33, "location": { "state": "Iowa", "city": "Longbranch", - "address": { - "street": "Dodworth Street", - "number": 3949 - } + "address": {"street": "Dodworth Street", "number": 3949}, }, "company": "Niquent", "email": "jewellstafford@niquent.com", "manager": True, - "favorites": [ - "C", - "C", - "Ruby", - "Ruby", - "Erlang" - ], - "exists_field" : "should_exist1", - "ordered": None + "favorites": ["C", "C", "Ruby", "Ruby", "Erlang"], + "exists_field": "should_exist1", + "ordered": None, }, { "_id": "6c0afcf1-e57e-421d-a03d-0c0717ebf843", "user_id": 8, - "name": { - "first": "James", - "last": "Mcdaniel" - }, + "name": {"first": "James", "last": "Mcdaniel"}, "age": 68, "location": { "state": "Maine", "city": "Craig", - "address": { - "street": "Greene Avenue", - "number": 8776 - } + "address": {"street": "Greene Avenue", "number": 8776}, }, "company": "Globoil", "email": "jamesmcdaniel@globoil.com", "manager": True, "favorites": None, - "exists_field" : "should_exist2", - "ordered": False + "exists_field": "should_exist2", + "ordered": False, }, { "_id": "954272af-d5ed-4039-a5eb-8ed57e9def01", "user_id": 9, - "name": { - "first": "Ramona", - "last": "Floyd" - }, + "name": {"first": "Ramona", "last": "Floyd"}, "age": 22, "location": { "state": "Missouri", "city": "Foxworth", - "address": { - "street": "Lott Place", - "number": 1697 - } + "address": {"street": "Lott Place", "number": 1697}, }, "company": "Manglo", "email": "ramonafloyd@manglo.com", "manager": True, "twitter": None, - "favorites": [ - "Lisp", - "Erlang", - "Python" - ], - "exists_array" : ["should", "exist", "array1"], - "complex_field_value" : "+-(){}[]^~&&*||\"\\/?:!", - "ordered": True + "favorites": ["Lisp", "Erlang", "Python"], + "exists_array": ["should", "exist", "array1"], + "complex_field_value": '+-(){}[]^~&&*||"\\/?:!', + "ordered": True, }, { "_id": "e900001d-bc48-48a6-9b1a-ac9a1f5d1a03", "user_id": 10, - "name": { - "first": "Charmaine", - "last": "Mills" - }, + "name": {"first": "Charmaine", "last": "Mills"}, "age": 43, "location": { "state": "New Hampshire", "city": "Kiskimere", - "address": { - "street": "Nostrand Avenue", - "number": 4503 - } + "address": {"street": "Nostrand Avenue", "number": 4503}, }, "company": "Lyria", "email": "charmainemills@lyria.com", "manager": True, - "favorites": [ - "Erlang", - "Erlang" - ], - "exists_array" : ["should", "exist", "array2"], - "ordered": 9 + "favorites": ["Erlang", "Erlang"], + "exists_array": ["should", "exist", "array2"], + "ordered": 9, }, { "_id": "b06aadcf-cd0f-4ca6-9f7e-2c993e48d4c4", "user_id": 11, - "name": { - "first": "Mathis", - "last": "Hernandez" - }, + "name": {"first": "Mathis", "last": "Hernandez"}, "age": 75, "location": { "state": "Hawaii", "city": "Dupuyer", - "address": { - "street": "Bancroft Place", - "number": 2741 - } + "address": {"street": "Bancroft Place", "number": 2741}, }, "company": "Affluex", "email": "mathishernandez@affluex.com", "manager": True, - "favorites": [ - "Ruby", - "Lisp", - "C", - "C++", - "C++" - ], - "exists_object" : {"should": "object"}, - "ordered": 10000 + "favorites": ["Ruby", "Lisp", "C", "C++", "C++"], + "exists_object": {"should": "object"}, + "ordered": 10000, }, { "_id": "5b61abc1-a3d3-4092-b9d7-ced90e675536", "user_id": 12, - "name": { - "first": "Patti", - "last": "Rosales" - }, + "name": {"first": "Patti", "last": "Rosales"}, "age": 71, "location": { "state": "Pennsylvania", "city": "Juntura", - "address": { - "street": "Hunterfly Place", - "number": 7683 - } + "address": {"street": "Hunterfly Place", "number": 7683}, }, "company": "Oulu", "email": "pattirosales@oulu.com", "manager": True, - "favorites": [ - "C", - "Python", - "Lisp" - ], - "exists_object" : {"another": "object"}, - "ordered": "a" + "favorites": ["C", "Python", "Lisp"], + "exists_object": {"another": "object"}, + "ordered": "a", }, { "_id": "b1e70402-8add-4068-af8f-b4f3d0feb049", "user_id": 13, - "name": { - "first": "Whitley", - "last": "Harvey" - }, + "name": {"first": "Whitley", "last": "Harvey"}, "age": 78, "location": { "state": "Minnesota", "city": "Trail", - "address": { - "street": "Pleasant Place", - "number": 8766 - } + "address": {"street": "Pleasant Place", "number": 8766}, }, "company": None, "email": "whitleyharvey@fangold.com", "manager": False, "twitter": "@whitleyharvey", - "favorites": [ - "C", - "Ruby", - "Ruby" - ], - "ordered": "A" + "favorites": ["C", "Ruby", "Ruby"], + "ordered": "A", }, { "_id": "c78c529f-0b07-4947-90a6-d6b7ca81da62", "user_id": 14, - "name": { - "first": "Faith", - "last": "Hess" - }, + "name": {"first": "Faith", "last": "Hess"}, "age": 51, "location": { "state": "North Dakota", "city": "Axis", - "address": { - "street": "Brightwater Avenue", - "number": 1106 - } + "address": {"street": "Brightwater Avenue", "number": 1106}, }, "company": "Pharmex", "email": "faithhess@pharmex.com", - "favorites": [ - "Erlang", - "Python", - "Lisp" - ], - "ordered": "aa" - } + "favorites": ["Erlang", "Python", "Lisp"], + "ordered": "aa", + }, ] @@ -511,7 +356,7 @@ def add_text_indexes(db, kwargs): "password": "apple01", "roles": ["design"], "order": 1, - "type": "user" + "type": "user", }, { "_id": "org.couchdb.user:demo02", @@ -520,7 +365,7 @@ def add_text_indexes(db, kwargs): "password": "apple02", "roles": ["reader"], "order": 2, - "type": "user" + "type": "user", }, { "_id": "org.couchdb.user:demo03", @@ -529,6 +374,6 @@ def add_text_indexes(db, kwargs): "password": "apple03", "roles": ["reader", "writer"], "order": 3, - "type": "user" - } + "type": "user", + }, ] diff --git a/test/javascript/run b/test/javascript/run index 283a7f77956..1fa605decec 100755 --- a/test/javascript/run +++ b/test/javascript/run @@ -72,14 +72,13 @@ def mkformatter(tests): def run_couchjs(test, fmt): fmt(test) - cmd = [COUCHJS, "--eval", "-H", "-T"] + \ - ["-u", "test/javascript/couchdb.uri"] + SCRIPTS + [test, RUNNER] - p = sp.Popen( - cmd, - stdin=sp.PIPE, - stdout=sp.PIPE, - stderr=sys.stderr + cmd = ( + [COUCHJS, "--eval", "-H", "-T"] + + ["-u", "test/javascript/couchdb.uri"] + + SCRIPTS + + [test, RUNNER] ) + p = sp.Popen(cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sys.stderr) while True: line = p.stdout.readline() if not line: @@ -93,20 +92,48 @@ def run_couchjs(test, fmt): def options(): return [ - op.make_option("-s", "--start", metavar="FILENAME", default=None, - help="Start from the given filename if multiple files " - "are passed"), - op.make_option("-a", "--all", action="store_true", dest="all", - help="Run all tests, even if one or more fail"), - op.make_option("-i", "--ignore", type="string", action="callback", - default=None, callback=get_delimited_list, - dest="ignore", help="Ignore test suites"), - op.make_option("-u", "--suites", type="string", action="callback", - default=None, callback=get_delimited_list, - dest="suites", help="Run specific suites"), - op.make_option("-p", "--path", type="string", - default="test/javascript/tests", - dest="test_path", help="Path where the tests are located") + op.make_option( + "-s", + "--start", + metavar="FILENAME", + default=None, + help="Start from the given filename if multiple files " "are passed", + ), + op.make_option( + "-a", + "--all", + action="store_true", + dest="all", + help="Run all tests, even if one or more fail", + ), + op.make_option( + "-i", + "--ignore", + type="string", + action="callback", + default=None, + callback=get_delimited_list, + dest="ignore", + help="Ignore test suites", + ), + op.make_option( + "-u", + "--suites", + type="string", + action="callback", + default=None, + callback=get_delimited_list, + dest="suites", + help="Run specific suites", + ), + op.make_option( + "-p", + "--path", + type="string", + default="test/javascript/tests", + dest="test_path", + help="Path where the tests are located", + ), ] @@ -118,10 +145,10 @@ def main(): ignore_list = [] tests = [] run_list = [opts.test_path] if not opts.suites else opts.suites - run_list = build_test_case_paths(opts.test_path,run_list) - ignore_list = build_test_case_paths(opts.test_path,opts.ignore) + run_list = build_test_case_paths(opts.test_path, run_list) + ignore_list = build_test_case_paths(opts.test_path, opts.ignore) # sort is needed because certain tests fail if executed out of order - tests = sorted(list(set(run_list)-set(ignore_list))) + tests = sorted(list(set(run_list) - set(ignore_list))) if opts.start is not None: tmp = [] @@ -132,25 +159,28 @@ def main(): passed = 0 failed = 0 - if len(tests) > 0 : - fmt = mkformatter(tests) - for test in tests: - result = run_couchjs(test, fmt) - if result == 0: - passed += 1 - else: - failed += 1 - if not opts.all: - break + if len(tests) > 0: + fmt = mkformatter(tests) + for test in tests: + result = run_couchjs(test, fmt) + if result == 0: + passed += 1 + else: + failed += 1 + if not opts.all: + break - sys.stderr.write("=======================================================" - + os.linesep) + sys.stderr.write( + "=======================================================" + os.linesep + ) sys.stderr.write("JavaScript tests complete." + os.linesep) - sys.stderr.write(" Failed: {0}. Skipped or passed: {1}.".format( - failed, passed) + os.linesep) + sys.stderr.write( + " Failed: {0}. Skipped or passed: {1}.".format(failed, passed) + os.linesep + ) exit(failed > 0) -def build_test_case_paths(path,args=None): + +def build_test_case_paths(path, args=None): tests = [] if args is None: args = [] @@ -171,9 +201,10 @@ def build_test_case_paths(path,args=None): def get_delimited_list(option, opt, value, parser): - delimited = [i for i in re.split(r',|\s', value.strip()) if i] + delimited = [i for i in re.split(r",|\s", value.strip()) if i] setattr(parser.values, option.dest, delimited) + if __name__ == "__main__": try: main() From 6d82d96020d436971622a4f015438b7ac9e93ab3 Mon Sep 17 00:00:00 2001 From: Paulo Eduardo Althoff Date: Mon, 21 Jan 2019 00:32:31 -0200 Subject: [PATCH 16/26] Fix python2 compatibility for couchup (#1868) Closes #1053 --- rel/overlay/bin/couchup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rel/overlay/bin/couchup b/rel/overlay/bin/couchup index b5ac8066f69..4f03759b243 100755 --- a/rel/overlay/bin/couchup +++ b/rel/overlay/bin/couchup @@ -22,7 +22,7 @@ import sys try: from urllib.parse import quote except ImportError: - from urllib.parse import quote + from urllib import quote import requests try: From 008ce5205b6c55c7a7e64592d584065f4a2a6c64 Mon Sep 17 00:00:00 2001 From: Clemens Stolle Date: Thu, 7 Feb 2019 06:54:19 +0100 Subject: [PATCH 17/26] fix couchup for python3 (#1905) --- rel/overlay/bin/couchup | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/rel/overlay/bin/couchup b/rel/overlay/bin/couchup index 4f03759b243..52d746c2d5d 100755 --- a/rel/overlay/bin/couchup +++ b/rel/overlay/bin/couchup @@ -231,7 +231,9 @@ def _replicate(args): if args["filter_deleted"]: doc["filter"] = "repl_filters/no_deleted" if args["creds"]: - auth = "Basic " + base64.b64encode(":".join(args["creds"])) + auth = ( + "Basic " + base64.b64encode(":".join(args["creds"]).encode()).decode() + ) headers = {"authorization": auth} doc["source"]["headers"] = headers doc["target"]["headers"] = headers From 9a8f0cc2b8a4ad322f50db2803f76535235ec88a Mon Sep 17 00:00:00 2001 From: Joan Touzet Date: Tue, 22 Jan 2019 19:49:29 -0500 Subject: [PATCH 18/26] Blacklist known bad Erlang releases, fixes #1857 (#1871) --- .travis.yml | 3 ++- rebar.config.script | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index b9c75c0ac80..2a757d07042 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,7 @@ dist: trusty otp_release: - 21.2.3 - - 20.3 + - 20.3.8.5 - 19.3 - 18.3 - 17.5 @@ -49,6 +49,7 @@ env: # Then comment this section out before_script: + - kerl list installations - rm -rf /tmp/couchjslogs - mkdir -p /tmp/couchjslogs - ./configure -c --disable-docs --disable-fauxton diff --git a/rebar.config.script b/rebar.config.script index a582c04cf0c..b603a121bfc 100644 --- a/rebar.config.script +++ b/rebar.config.script @@ -1,3 +1,4 @@ +%% -*- erlang -*- % Licensed under the Apache License, Version 2.0 (the "License"); you may not % use this file except in compliance with the License. You may obtain a copy of % the License at @@ -10,6 +11,42 @@ % License for the specific language governing permissions and limitations under % the License. +% +% Blacklist some bad releases. +% +{ok, Version} = file:read_file(filename:join( + [code:root_dir(), "releases", erlang:system_info(otp_release), "OTP_VERSION"] +)). + +% Version may be binary if file has /n at end :( +% there is no string:trim/1 in Erlang 19 :( +VerString = case Version of + V when is_binary(V) -> string:strip(binary_to_list(V), right, $\n); + _ -> string:strip(Version, right, $\n) +end. +VerList = lists:map(fun(X) -> {Int, _} = string:to_integer(X), Int end, + string:tokens(VerString, ".")). + +NotSupported = fun(Ver) -> + io:fwrite("CouchDB does not support this version of Erlang (~p).~n", [Ver]), + io:fwrite("Check https://docs.couchdb.org/en/latest/whatsnew/index.html for the~n"), + io:fwrite("latest information on supported releases.~n"), + case os:getenv("TRAVIS") of + "true" -> + io:fwrite("Travis run, ignoring bad release. You have been warned!~n"), + ok; + _ -> halt(1) + end +end. + +case VerList of + [20 | _] = V20 when V20 < [20, 3, 8, 11] -> NotSupported(VerString); + [20 | _] = V20 when V20 >= [20, 3, 8, 11] -> ok; + [21, 2] -> NotSupported(VerString); + [21, 2, N | _] when N < 3 -> NotSupported(VerString); + _ -> ok +end. + % Set the path to the configuration environment generated % by `./configure`. From f9fde69ee48ab9551dcf0986057b3a6e6811f572 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Wed, 6 Feb 2019 12:43:37 +0000 Subject: [PATCH 19/26] run formatting check before time-consuming tests --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b2f3f04aa48..b8c1ae86879 100644 --- a/Makefile +++ b/Makefile @@ -138,9 +138,9 @@ fauxton: share/www check: all @$(MAKE) test-cluster-with-quorum @$(MAKE) test-cluster-without-quorum + @$(MAKE) python-black @$(MAKE) eunit @$(MAKE) javascript - @$(MAKE) python-black @$(MAKE) mango-test # @$(MAKE) build-test From 5ffd8b79c0ffd933589867ee77ebd1aeeb118dcc Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Thu, 7 Feb 2019 11:52:03 +0100 Subject: [PATCH 20/26] drop Erlang 17 from travis --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 2a757d07042..0ce90e6a19e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,6 @@ otp_release: - 20.3.8.5 - 19.3 - 18.3 - - 17.5 addons: apt: From 50bb9cc7686fb451c6db7a533caab4b97941c04f Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Mon, 14 Jan 2019 16:33:13 -0600 Subject: [PATCH 21/26] Fix read repair in a mixed cluster environment This enables backwards compatbility with nodes still running the old version of fabric_rpc when a cluster is upgraded to master. This has no effect once all nodes are upgraded to the latest version. --- src/fabric/src/fabric_doc_open.erl | 4 ++-- src/fabric/src/fabric_doc_open_revs.erl | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/fabric/src/fabric_doc_open.erl b/src/fabric/src/fabric_doc_open.erl index 0a85346f7bc..aafdcfb796d 100644 --- a/src/fabric/src/fabric_doc_open.erl +++ b/src/fabric/src/fabric_doc_open.erl @@ -136,7 +136,7 @@ read_repair(#acc{dbname=DbName, replies=Replies, node_revs=NodeRevs}) -> [#doc{id = <>} | _] -> choose_reply(Docs); [#doc{id=Id} | _] -> - Opts = [?ADMIN_CTX, {read_repair, NodeRevs}], + Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}], Res = fabric:update_docs(DbName, Docs, Opts), case Res of {ok, []} -> @@ -592,4 +592,4 @@ t_get_doc_info() -> ?assert(is_record(Rec2, full_doc_info)) end). --endif. \ No newline at end of file +-endif. diff --git a/src/fabric/src/fabric_doc_open_revs.erl b/src/fabric/src/fabric_doc_open_revs.erl index dc03f3df69a..8ac3f30dcf2 100644 --- a/src/fabric/src/fabric_doc_open_revs.erl +++ b/src/fabric/src/fabric_doc_open_revs.erl @@ -224,7 +224,7 @@ dict_repair_docs(Replies, ReplyCount) -> read_repair(Db, Docs, NodeRevs) -> - Opts = [?ADMIN_CTX, {read_repair, NodeRevs}], + Opts = [?ADMIN_CTX, replicated_changes, {read_repair, NodeRevs}], Res = fabric:update_docs(Db, Docs, Opts), case Res of {ok, []} -> From 18b59cb0774ec1292e157379b600f2aa160cd3cd Mon Sep 17 00:00:00 2001 From: Nick Vatamaniuc Date: Thu, 24 Jan 2019 11:59:49 -0500 Subject: [PATCH 22/26] Update config dependency to 2.1.5 This fixes inability to set keys with regex symbols in them --- rebar.config.script | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.config.script b/rebar.config.script index b603a121bfc..c483b192afd 100644 --- a/rebar.config.script +++ b/rebar.config.script @@ -85,7 +85,7 @@ SubDirs = [ DepDescs = [ %% Independent Apps -{config, "config", {tag, "2.1.4"}}, +{config, "config", {tag, "2.1.5"}}, {b64url, "b64url", {tag, "1.0.1"}}, {ets_lru, "ets-lru", {tag, "1.0.0"}}, {khash, "khash", {tag, "1.0.1"}}, From 64d026a86e634da2d0a38c79adb8ad7c60e6c4d9 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Wed, 5 Dec 2018 13:42:47 -0600 Subject: [PATCH 23/26] Add `couch_db:get_design_doc/2` This adds an API call for looking up a single design doc regardless of whether the database is clustered or not. --- src/couch/src/couch_db.erl | 14 ++++++++++++++ src/couch/src/couch_util.erl | 5 +++++ 2 files changed, 19 insertions(+) diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl index 0df04db3a13..0ae164d9ba1 100644 --- a/src/couch/src/couch_db.erl +++ b/src/couch/src/couch_db.erl @@ -77,6 +77,7 @@ get_full_doc_info/2, get_full_doc_infos/2, get_missing_revs/2, + get_design_doc/2, get_design_docs/1, get_design_doc_count/1, get_purge_infos/2, @@ -608,6 +609,19 @@ get_db_info(Db) -> ], {ok, InfoList}. +get_design_doc(#db{name = <<"shards/", _/binary>> = ShardDbName}, DDocId0) -> + DDocId = couch_util:normalize_ddoc_id(DDocId0), + DbName = mem3:dbname(ShardDbName), + {_, Ref} = spawn_monitor(fun() -> + exit(fabric:open_doc(DbName, DDocId, [])) + end), + receive {'DOWN', Ref, _, _, Response} -> + Response + end; +get_design_doc(#db{} = Db, DDocId0) -> + DDocId = couch_util:normalize_ddoc_id(DDocId0), + couch_db:open_doc_int(Db, DDocId, [ejson_body]). + get_design_docs(#db{name = <<"shards/", _/binary>> = ShardDbName}) -> DbName = mem3:dbname(ShardDbName), {_, Ref} = spawn_monitor(fun() -> exit(fabric:design_docs(DbName)) end), diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl index 3efec84a9b2..ddd24aab000 100644 --- a/src/couch/src/couch_util.erl +++ b/src/couch/src/couch_util.erl @@ -27,6 +27,7 @@ -export([reorder_results/2]). -export([url_strip_password/1]). -export([encode_doc_id/1]). +-export([normalize_ddoc_id/1]). -export([with_db/2]). -export([rfc1123_date/0, rfc1123_date/1]). -export([integer_to_boolean/1, boolean_to_integer/1]). @@ -543,6 +544,10 @@ encode_doc_id(<<"_local/", Rest/binary>>) -> encode_doc_id(Id) -> url_encode(Id). +normalize_ddoc_id(<<"_design/", _/binary>> = DDocId) -> + DDocId; +normalize_ddoc_id(DDocId) when is_binary(DDocId) -> + <<"_design/", DDocId/binary>>. with_db(DbName, Fun) when is_binary(DbName) -> case couch_db:open_int(DbName, [?ADMIN_CTX]) of From fc27f75f63b050a00967f503112fddeedf796d5a Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Wed, 5 Dec 2018 13:43:31 -0600 Subject: [PATCH 24/26] Avoid calls to `fabric:design_docs/1` The underlying clustered _all_docs call can cause significant extra load during compaction. --- src/couch_mrview/src/couch_mrview_index.erl | 26 +++------------------ 1 file changed, 3 insertions(+), 23 deletions(-) diff --git a/src/couch_mrview/src/couch_mrview_index.erl b/src/couch_mrview/src/couch_mrview_index.erl index 4718b562d2b..d3bcfe04bc6 100644 --- a/src/couch_mrview/src/couch_mrview_index.erl +++ b/src/couch_mrview/src/couch_mrview_index.erl @@ -226,16 +226,15 @@ verify_index_exists(DbName, Props) -> if Type =/= <<"mrview">> -> false; true -> DDocId = couch_util:get_value(<<"ddoc_id">>, Props), couch_util:with_db(DbName, fun(Db) -> - {ok, DesignDocs} = couch_db:get_design_docs(Db), - case get_ddoc(DbName, DesignDocs, DDocId) of - #doc{} = DDoc -> + case couch_db:get_design_doc(Db, DDocId) of + {ok, #doc{} = DDoc} -> {ok, IdxState} = couch_mrview_util:ddoc_to_mrst( DbName, DDoc), IdxSig = IdxState#mrst.sig, SigInLocal = couch_util:get_value( <<"signature">>, Props), couch_index_util:hexsig(IdxSig) == SigInLocal; - not_found -> + {not_found, _} -> false end end) @@ -245,25 +244,6 @@ verify_index_exists(DbName, Props) -> end. -get_ddoc(<<"shards/", _/binary>> = _DbName, DesignDocs, DDocId) -> - DDocs = [couch_doc:from_json_obj(DD) || DD <- DesignDocs], - case lists:keyfind(DDocId, #doc.id, DDocs) of - #doc{} = DDoc -> DDoc; - false -> not_found - end; -get_ddoc(DbName, DesignDocs, DDocId) -> - couch_util:with_db(DbName, fun(Db) -> - case lists:keyfind(DDocId, #full_doc_info.id, DesignDocs) of - #full_doc_info{} = DDocInfo -> - {ok, DDoc} = couch_db:open_doc_int( - Db, DDocInfo, [ejson_body]), - DDoc; - false -> - not_found - end - end). - - ensure_local_purge_docs(DbName, DDocs) -> couch_util:with_db(DbName, fun(Db) -> lists:foreach(fun(DDoc) -> From 8027f6a20d833ee246a53ddd916d88ce50405c2a Mon Sep 17 00:00:00 2001 From: Jan Lehnardt Date: Tue, 12 Feb 2019 12:49:16 +0100 Subject: [PATCH 25/26] update Fauxton to 1.1.20 --- rebar.config.script | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rebar.config.script b/rebar.config.script index c483b192afd..34dbdfa6bb5 100644 --- a/rebar.config.script +++ b/rebar.config.script @@ -85,7 +85,7 @@ SubDirs = [ DepDescs = [ %% Independent Apps -{config, "config", {tag, "2.1.5"}}, +{config, "config", {tag, "2.1.4"}}, {b64url, "b64url", {tag, "1.0.1"}}, {ets_lru, "ets-lru", {tag, "1.0.0"}}, {khash, "khash", {tag, "1.0.1"}}, @@ -96,7 +96,7 @@ DepDescs = [ {docs, {url, "https://github.com/apache/couchdb-documentation"}, {tag, "2.3.0"}, [raw]}, {fauxton, {url, "https://github.com/apache/couchdb-fauxton"}, - {tag, "v1.1.19"}, [raw]}, + {tag, "v1.1.20"}, [raw]}, %% Third party deps {folsom, "folsom", {tag, "CouchDB-0.8.2"}}, {hyper, "hyper", {tag, "CouchDB-2.2.0-4"}}, From bb99f305d9bf5e0781380eaee8b5f95bf73840fe Mon Sep 17 00:00:00 2001 From: Jay Doane Date: Tue, 12 Feb 2019 11:27:13 -0800 Subject: [PATCH 26/26] Sync admin password hashes at cluster setup finish This ensures that admin password hashes are the same on all nodes when passwords are set directly on each node rather than through the coordinator node. --- src/setup/src/setup.erl | 66 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/src/setup/src/setup.erl b/src/setup/src/setup.erl index 3ae455f5460..9437fbc073b 100644 --- a/src/setup/src/setup.erl +++ b/src/setup/src/setup.erl @@ -198,9 +198,75 @@ setup_node(NewCredentials, NewBindAddress, NodeCount, Port) -> finish_cluster(Options) -> + ok = wait_connected(), + ok = sync_admins(), Dbs = proplists:get_value(ensure_dbs_exist, Options, cluster_system_dbs()), finish_cluster_int(Dbs, has_cluster_system_dbs(Dbs)). + +wait_connected() -> + Nodes = other_nodes(), + Result = test_util:wait(fun() -> + case disconnected(Nodes) of + [] -> ok; + _ -> wait + end + end), + case Result of + timeout -> + Reason = "Cluster setup timed out waiting for nodes to connect", + throw({setup_error, Reason}); + ok -> + ok + end. + + +other_nodes() -> + mem3:nodes() -- [node()]. + + +disconnected(Nodes) -> + lists:filter(fun(Node) -> + case net_adm:ping(Node) of + pong -> false; + pang -> true + end + end, Nodes). + + +sync_admins() -> + ok = lists:foreach(fun({User, Pass}) -> + sync_admin(User, Pass) + end, config:get("admins")). + + +sync_admin(User, Pass) -> + {Results, Errors} = rpc:multicall(other_nodes(), config, set, + ["admins", User, Pass]), + case validate_multicall(Results, Errors) of + ok -> + ok; + error -> + log:error("~p sync_admin results ~p errors ~p", + [?MODULE, Results, Errors]), + Reason = "Cluster setup unable to sync admin passwords", + throw({setup_error, Reason}) + end. + + +validate_multicall(Results, Errors) -> + AllOk = lists:all(fun + (ok) -> true; + (_) -> false + end, Results), + case AllOk andalso Errors == [] of + true -> + ok; + false -> + error + end. + + finish_cluster_int(_Dbs, true) -> {error, cluster_finished}; finish_cluster_int(Dbs, false) ->