Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions be/src/pipeline/exec/exchange_sink_buffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,9 @@ Status ExchangeSinkBuffer<Parent>::_send_rpc(InstanceLoId id) {
_set_ready_to_finish(_busy_channels.fetch_sub(1) == 1);
}

if (_is_receiver_eof(id)) {
return Status::EndOfFile("receiver eof");
}
return Status::OK();
}

Expand Down
2 changes: 2 additions & 0 deletions be/src/pipeline/pipeline_fragment_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,8 @@ class PipelineFragmentContext : public TaskExecutionContext {
const PPlanFragmentCancelReason& reason = PPlanFragmentCancelReason::INTERNAL_ERROR,
const std::string& msg = "");

void set_reach_limit() { _query_ctx->set_reach_limit(); }

// TODO: Support pipeline runtime filter

QueryContext* get_query_context() { return _query_ctx.get(); }
Expand Down
6 changes: 5 additions & 1 deletion be/src/pipeline/task_scheduler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,11 @@ void TaskScheduler::_do_work(size_t index) {
auto status = Status::OK();

try {
status = task->execute(&eos);
if (task->query_context()->reach_limit()) {
eos = true;
} else {
status = task->execute(&eos);
}
} catch (const Exception& e) {
status = e.to_status();
}
Expand Down
16 changes: 12 additions & 4 deletions be/src/runtime/fragment_mgr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1060,17 +1060,25 @@ void FragmentMgr::cancel_instance_unlocked(const TUniqueId& instance_id,
auto itr = _pipeline_map.find(instance_id);

if (itr != _pipeline_map.end()) {
// calling PipelineFragmentContext::cancel
itr->second->cancel(reason, msg);
if (reason == PPlanFragmentCancelReason::LIMIT_REACH) {
itr->second->set_reach_limit();
} else {
// calling PipelineFragmentContext::cancel
itr->second->cancel(reason, msg);
}
} else {
LOG(WARNING) << "Could not find the pipeline instance id:" << print_id(instance_id)
<< " to cancel";
}
} else {
auto itr = _fragment_instance_map.find(instance_id);
if (itr != _fragment_instance_map.end()) {
// calling PlanFragmentExecutor::cancel
itr->second->cancel(reason, msg);
if (reason == PPlanFragmentCancelReason::LIMIT_REACH) {
itr->second->set_reach_limit();
} else {
// calling PlanFragmentExecutor::cancel
itr->second->cancel(reason, msg);
}
} else {
LOG(WARNING) << "Could not find the fragment instance id:" << print_id(instance_id)
<< " to cancel";
Expand Down
2 changes: 2 additions & 0 deletions be/src/runtime/plan_fragment_executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@ class PlanFragmentExecutor : public TaskExecutionContext {
// in open()/get_next().
void close();

void set_reach_limit() { _query_ctx->set_reach_limit(); }

// Initiate cancellation. Must not be called until after prepare() returned.
void cancel(const PPlanFragmentCancelReason& reason = PPlanFragmentCancelReason::INTERNAL_ERROR,
const std::string& msg = "");
Expand Down
3 changes: 3 additions & 0 deletions be/src/runtime/query_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@ class QueryContext {
void set_ready_to_execute(bool is_cancelled);

[[nodiscard]] bool is_cancelled() const { return _is_cancelled.load(); }
[[nodiscard]] bool reach_limit() const { return _reach_limit.load(); }
void set_reach_limit() { _reach_limit = true; }
bool cancel(bool v, std::string msg, Status new_status, int fragment_id = -1);

void set_exec_status(Status new_status) {
Expand Down Expand Up @@ -253,6 +255,7 @@ class QueryContext {
// And all fragments of this query will start execution when this is set to true.
std::atomic<bool> _ready_to_execute {false};
std::atomic<bool> _is_cancelled {false};
std::atomic<bool> _reach_limit {false};

std::shared_ptr<vectorized::SharedHashTableController> _shared_hash_table_controller;
std::shared_ptr<vectorized::SharedScannerController> _shared_scanner_controller;
Expand Down
5 changes: 5 additions & 0 deletions be/src/vec/exec/format/orc/vorc_reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1403,6 +1403,11 @@ std::string OrcReader::_get_field_name_lower_case(const orc::Type* orc_type, int
}

Status OrcReader::get_next_block(Block* block, size_t* read_rows, bool* eof) {
if (_io_ctx && _io_ctx->should_stop) {
*eof = true;
*read_rows = 0;
return Status::OK();
}
if (_push_down_agg_type == TPushAggOp::type::COUNT) {
auto rows = std::min(get_remaining_rows(), (int64_t)_batch_size);

Expand Down
2 changes: 1 addition & 1 deletion be/src/vec/exec/scan/pip_scanner_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class PipScannerContext : public vectorized::ScannerContext {
{
std::unique_lock<std::mutex> l(*_queue_mutexs[id]);
if (_blocks_queues[id].empty()) {
*eos = _is_finished || _should_stop;
*eos = done();
return Status::OK();
}
if (_process_status.is<ErrorCode::CANCELLED>()) {
Expand Down
15 changes: 8 additions & 7 deletions be/src/vec/exec/scan/scanner_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ Status ScannerContext::get_block_from_queue(RuntimeState* state, vectorized::Blo
int num_running_scanners = _num_running_scanners;

bool is_scheduled = false;
if (to_be_schedule && _num_running_scanners == 0) {
if (!done() && to_be_schedule && _num_running_scanners == 0) {
is_scheduled = true;
auto state = _scanner_scheduler->submit(shared_from_this());
if (state.ok()) {
Expand All @@ -287,8 +287,7 @@ Status ScannerContext::get_block_from_queue(RuntimeState* state, vectorized::Blo
if (wait) {
// scanner batch wait time
SCOPED_TIMER(_scanner_wait_batch_timer);
while (!(!_blocks_queue.empty() || _is_finished || !status().ok() ||
state->is_cancelled())) {
while (!(!_blocks_queue.empty() || done() || !status().ok() || state->is_cancelled())) {
if (!is_scheduled && _num_running_scanners == 0 && should_be_scheduled()) {
LOG(INFO) << "fatal, cur_bytes_in_queue " << cur_bytes_in_queue
<< ", serving_blocks_num " << serving_blocks_num
Expand Down Expand Up @@ -330,7 +329,7 @@ Status ScannerContext::get_block_from_queue(RuntimeState* state, vectorized::Blo
}
}
} else {
*eos = _is_finished;
*eos = done();
}
}

Expand Down Expand Up @@ -400,8 +399,7 @@ void ScannerContext::dec_num_scheduling_ctx() {

void ScannerContext::set_ready_to_finish() {
// `_should_stop == true` means this task has already ended and wait for pending finish now.
if (_finish_dependency && _should_stop && _num_running_scanners == 0 &&
_num_scheduling_ctx == 0) {
if (_finish_dependency && done() && _num_running_scanners == 0 && _num_scheduling_ctx == 0) {
_finish_dependency->set_ready();
}
}
Expand Down Expand Up @@ -524,6 +522,9 @@ std::string ScannerContext::debug_string() {

void ScannerContext::reschedule_scanner_ctx() {
std::lock_guard l(_transfer_lock);
if (done()) {
return;
}
auto state = _scanner_scheduler->submit(shared_from_this());
//todo(wb) rethinking is it better to mark current scan_context failed when submit failed many times?
if (state.ok()) {
Expand All @@ -546,7 +547,7 @@ void ScannerContext::push_back_scanner_and_reschedule(VScannerSPtr scanner) {
_num_running_scanners--;
set_ready_to_finish();

if (should_be_scheduled()) {
if (!done() && should_be_scheduled()) {
auto state = _scanner_scheduler->submit(shared_from_this());
if (state.ok()) {
_num_scheduling_ctx++;
Expand Down
6 changes: 2 additions & 4 deletions be/src/vec/exec/scan/vscanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,12 +113,10 @@ Status VScanner::get_block(RuntimeState* state, Block* block, bool* eof) {
if (state->is_cancelled()) {
return Status::Cancelled("cancelled");
}

*eof = *eof || _should_stop;
// set eof to true if per scanner limit is reached
// currently for query: ORDER BY key LIMIT n
if (_limit > 0 && _num_rows_return >= _limit) {
*eof = true;
}
*eof = *eof || (_limit > 0 && _num_rows_return >= _limit);

return Status::OK();
}
Expand Down
7 changes: 6 additions & 1 deletion be/src/vec/sink/vdata_stream_sender.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,12 @@ Status VDataStreamSender::send(RuntimeState* state, Block* block, bool eos) {
// 1. calculate range
// 2. dispatch rows to channel
}
return Status::OK();
for (auto channel : _channels) {
if (!channel->is_receiver_eof()) {
return Status::OK();
}
}
return Status::EndOfFile("all data stream channels EOF");
}

Status VDataStreamSender::try_close(RuntimeState* state, Status exec_status) {
Expand Down
48 changes: 47 additions & 1 deletion fe/fe-core/src/main/java/org/apache/doris/qe/Coordinator.java
Original file line number Diff line number Diff line change
Expand Up @@ -1346,7 +1346,7 @@ public RowBatch getNext() throws Exception {
this.returnedAllResults = true;

// if this query is a block query do not cancel.
Long numLimitRows = fragments.get(0).getPlanRoot().getLimit();
long numLimitRows = fragments.get(0).getPlanRoot().getLimit();
boolean hasLimit = numLimitRows > 0;
if (!isBlockQuery && instanceIds.size() > 1 && hasLimit && numReceivedRows >= numLimitRows) {
LOG.debug("no block query, return num >= limit rows, need cancel");
Expand All @@ -1359,6 +1359,12 @@ public RowBatch getNext() throws Exception {
} else if (resultBatch.getBatch() != null) {
numReceivedRows += resultBatch.getBatch().getRowsSize();
}
long numLimitRows = fragments.get(0).getPlanRoot().getLimit();
if (numLimitRows > 0) {
if (numReceivedRows >= numLimitRows) {
cleanRemoteFragmentsAsync(Types.PPlanFragmentCancelReason.LIMIT_REACH);
}
}

return resultBatch;
}
Expand Down Expand Up @@ -1475,6 +1481,18 @@ private void cancelInternal(Types.PPlanFragmentCancelReason cancelReason, long b
executionProfile.onCancel();
}

private void cleanRemoteFragmentsAsync(Types.PPlanFragmentCancelReason cleanReason) {
if (enablePipelineEngine) {
for (PipelineExecContext ctx : pipelineExecContexts.values()) {
ctx.cleanFragmentInstance(cleanReason);
}
} else {
for (BackendExecState backendExecState : backendExecStates) {
backendExecState.cleanFragmentInstance(cleanReason);
}
}
}

private void cancelRemoteFragmentsAsync(Types.PPlanFragmentCancelReason cancelReason) {
if (enablePipelineEngine) {
for (PipelineExecContext ctx : pipelineExecContexts.values()) {
Expand Down Expand Up @@ -3014,6 +3032,18 @@ public synchronized void printProfile(StringBuilder builder) {
this.instanceProfile.prettyPrint(builder, "");
}

public synchronized void cleanFragmentInstance(Types.PPlanFragmentCancelReason cleanReason) {
if (!initiated || done || hasCanceled) {
return;
}
try {
BackendServiceProxy.getInstance().cancelPlanFragmentAsync(brpcAddress,
fragmentInstanceId(), cleanReason);
} catch (RpcException ignored) {
// do nothing
}
}

// cancel the fragment instance.
// return true if cancel success. Otherwise, return false
public synchronized boolean cancelFragmentInstance(Types.PPlanFragmentCancelReason cancelReason) {
Expand Down Expand Up @@ -3213,6 +3243,22 @@ public synchronized void printProfile(StringBuilder builder) {
});
}

// clean all fragment instances, inorder to stop the running instances when query is finished.
// for query with limit statement.
public synchronized void cleanFragmentInstance(Types.PPlanFragmentCancelReason cleanReason) {
if (!initiated || done || hasCanceled) {
return;
}
for (TPipelineInstanceParams localParam : rpcParams.local_params) {
try {
BackendServiceProxy.getInstance().cancelPlanFragmentAsync(brpcAddress,
localParam.fragment_instance_id, cleanReason);
} catch (RpcException ignored) {
// do nothing
}
}
}

// cancel all fragment instances.
// return true if cancel success. Otherwise, return false

Expand Down