From 6f1dfafcc257b44b513b0a1d009571916b40dcc7 Mon Sep 17 00:00:00 2001 From: zhaochangle Date: Wed, 31 Jul 2024 17:22:50 +0800 Subject: [PATCH] 1 --- be/src/cloud/cloud_tablets_channel.cpp | 23 ++++++++++++++--------- be/src/runtime/tablets_channel.cpp | 9 +++++++++ be/src/runtime/tablets_channel.h | 9 ++++----- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/be/src/cloud/cloud_tablets_channel.cpp b/be/src/cloud/cloud_tablets_channel.cpp index e063ab68116bb2..85b8e3ea33a865 100644 --- a/be/src/cloud/cloud_tablets_channel.cpp +++ b/be/src/cloud/cloud_tablets_channel.cpp @@ -59,15 +59,20 @@ Status CloudTabletsChannel::add_batch(const PTabletWriterAddBlockRequest& reques _build_tablet_to_rowidxs(request, &tablet_to_rowidxs); std::unordered_set partition_ids; - for (auto& [tablet_id, _] : tablet_to_rowidxs) { - auto tablet_writer_it = _tablet_writers.find(tablet_id); - if (tablet_writer_it == _tablet_writers.end()) { - return Status::InternalError("unknown tablet to append data, tablet={}", tablet_id); + { + // add_batch may concurrency with inc_open but not under _lock. + // so need to protect it with _tablet_writers_lock. + std::lock_guard l(_tablet_writers_lock); + for (auto& [tablet_id, _] : tablet_to_rowidxs) { + auto tablet_writer_it = _tablet_writers.find(tablet_id); + if (tablet_writer_it == _tablet_writers.end()) { + return Status::InternalError("unknown tablet to append data, tablet={}", tablet_id); + } + partition_ids.insert(tablet_writer_it->second->partition_id()); + } + if (!partition_ids.empty()) { + RETURN_IF_ERROR(_init_writers_by_partition_ids(partition_ids)); } - partition_ids.insert(tablet_writer_it->second->partition_id()); - } - if (!partition_ids.empty()) { - RETURN_IF_ERROR(_init_writers_by_partition_ids(partition_ids)); } return _write_block_data(request, cur_seq, tablet_to_rowidxs, response); @@ -124,7 +129,7 @@ Status CloudTabletsChannel::close(LoadChannel* parent, const PTabletWriterAddBlo _state = kFinished; // All senders are closed - // 1. close all delta writers + // 1. close all delta writers. under _lock. std::vector writers_to_commit; writers_to_commit.reserve(_tablet_writers.size()); bool success = true; diff --git a/be/src/runtime/tablets_channel.cpp b/be/src/runtime/tablets_channel.cpp index 11ddf27cfcdb8e..a58ff59b6a8677 100644 --- a/be/src/runtime/tablets_channel.cpp +++ b/be/src/runtime/tablets_channel.cpp @@ -215,6 +215,7 @@ Status BaseTabletsChannel::incremental_open(const PTabletWriterOpenRequest& para ss << "LocalTabletsChannel txn_id: " << _txn_id << " load_id: " << print_id(params.id()) << " incremental open delta writer: "; + // every change will hold _lock. this find in under _lock too. so no need _tablet_writers_lock again. for (const auto& tablet : params.tablets()) { if (_tablet_writers.find(tablet.tablet_id()) != _tablet_writers.end()) { continue; @@ -237,6 +238,7 @@ Status BaseTabletsChannel::incremental_open(const PTabletWriterOpenRequest& para auto delta_writer = create_delta_writer(wrequest); { + // here we modify _tablet_writers. so need lock. std::lock_guard l(_tablet_writers_lock); _tablet_writers.emplace(tablet.tablet_id(), std::move(delta_writer)); } @@ -291,6 +293,7 @@ Status TabletsChannel::close(LoadChannel* parent, const PTabletWriterAddBlockReq // All senders are closed // 1. close all delta writers std::set need_wait_writers; + // under _lock. no need _tablet_writers_lock again. for (auto&& [tablet_id, writer] : _tablet_writers) { if (_partition_ids.contains(writer->partition_id())) { auto st = writer->close(); @@ -492,6 +495,7 @@ Status BaseTabletsChannel::_open_all_writers(const PTabletWriterOpenRequest& req #endif int tablet_cnt = 0; + // under _lock. no need _tablet_writers_lock again. for (const auto& tablet : request.tablets()) { if (_tablet_writers.find(tablet.tablet_id()) != _tablet_writers.end()) { continue; @@ -574,6 +578,11 @@ Status BaseTabletsChannel::_write_block_data( std::function write_func) { google::protobuf::RepeatedPtrField* tablet_errors = response->mutable_tablet_errors(); + + // add_batch may concurrency with inc_open but not under _lock. + // so need to protect it with _tablet_writers_lock. + std::lock_guard l(_tablet_writers_lock); + auto tablet_writer_it = _tablet_writers.find(tablet_id); if (tablet_writer_it == _tablet_writers.end()) { return Status::InternalError("unknown tablet to append data, tablet={}", tablet_id); diff --git a/be/src/runtime/tablets_channel.h b/be/src/runtime/tablets_channel.h index 48e987341587d7..87fbf9d06aaaa7 100644 --- a/be/src/runtime/tablets_channel.h +++ b/be/src/runtime/tablets_channel.h @@ -143,11 +143,8 @@ class BaseTabletsChannel { // id of this load channel TabletsChannelKey _key; - // make execute sequence + // protect _state change. open and close. when add_batch finished, lock to change _next_seqs also std::mutex _lock; - - SpinLock _tablet_writers_lock; - enum State { kInitialized, kOpened, @@ -173,8 +170,10 @@ class BaseTabletsChannel { // currently it's OK. Status _close_status; - // tablet_id -> TabletChannel + // tablet_id -> TabletChannel. it will only be changed in open() or inc_open() std::unordered_map> _tablet_writers; + // protect _tablet_writers + SpinLock _tablet_writers_lock; // broken tablet ids. // If a tablet write fails, it's id will be added to this set. // So that following batch will not handle this tablet anymore.