Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 7 additions & 8 deletions be/src/runtime/load_stream_writer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -187,13 +187,6 @@ Status LoadStreamWriter::add_segment(uint32_t segid, const SegmentStatistics& st
if (!_is_init) {
return Status::Corruption("add_segment failed, LoadStreamWriter is not inited");
}
if (_inverted_file_writers.size() > 0 &&
_inverted_file_writers.size() != _segment_file_writers.size()) {
return Status::Corruption(
"add_segment failed, inverted file writer size is {},"
"segment file writer size is {}",
_inverted_file_writers.size(), _segment_file_writers.size());
}
DBUG_EXECUTE_IF("LoadStreamWriter.add_segment.bad_segid",
{ segid = _segment_file_writers.size(); });
RETURN_IF_ERROR(_calc_file_size(segid, FileType::SEGMENT_FILE, &segment_file_size));
Expand Down Expand Up @@ -255,7 +248,13 @@ Status LoadStreamWriter::close() {
if (_is_canceled) {
return Status::InternalError("flush segment failed");
}

if (_inverted_file_writers.size() > 0 &&
_inverted_file_writers.size() != _segment_file_writers.size()) {
return Status::Corruption(
"LoadStreamWriter close failed, inverted file writer size is {},"
"segment file writer size is {}",
_inverted_file_writers.size(), _segment_file_writers.size());
}
for (const auto& writer : _segment_file_writers) {
if (writer->state() != io::FileWriter::State::CLOSED) {
return Status::Corruption("LoadStreamWriter close failed, segment {} is not closed",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
-- This file is automatically generated. You should know what you did if you want to edit this
-- !sql_select_count --
67843

Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

suite("test_move_memtable_multi_segment_index", "nonConcurrent"){
def backendId_to_backendIP = [:]
def backendId_to_backendHttpPort = [:]
getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
def set_be_config = { key, value ->
for (String backend_id: backendId_to_backendIP.keySet()) {
def (code, out, err) = update_be_config(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), key, value)
logger.info("update config: code=" + code + ", out=" + out + ", err=" + err)
}
}
def load_json_data = {table_name, file_name ->
// load the json data
streamLoad {
table "${table_name}"

// set http request header params
set 'read_json_by_line', 'true'
set 'format', 'json'
set 'max_filter_ratio', '0.1'
set 'memtable_on_sink_node', 'true'
file file_name // import json file
time 10000 // limit inflight 10s

// if declared a check callback, the default check condition will ignore.
// So you must check all condition

check { result, exception, startTime, endTime ->
if (exception != null) {
throw exception
}
logger.info("Stream load ${file_name} result: ${result}".toString())
def json = parseJson(result)
assertEquals("success", json.Status.toLowerCase())
// assertEquals(json.NumberTotalRows, json.NumberLoadedRows + json.NumberUnselectedRows)
assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
}
}
}
try {
set_be_config("write_buffer_size", "2097152")
def table_name = "github_events"
sql """DROP TABLE IF EXISTS ${table_name}"""
table_name = "github_events"
sql """
CREATE TABLE IF NOT EXISTS ${table_name} (
k bigint,
v variant,
INDEX idx_var(v) USING INVERTED PROPERTIES("parser" = "english") COMMENT ''
)
DUPLICATE KEY(`k`)
DISTRIBUTED BY HASH(k) BUCKETS 1
properties("replication_num" = "1", "disable_auto_compaction" = "true");
"""

load_json_data.call(table_name, """${getS3Url() + '/regression/gharchive.m/2015-01-01-0.json'}""")
load_json_data.call(table_name, """${getS3Url() + '/regression/gharchive.m/2015-01-01-1.json'}""")
load_json_data.call(table_name, """${getS3Url() + '/regression/gharchive.m/2015-01-01-2.json'}""")
load_json_data.call(table_name, """${getS3Url() + '/regression/gharchive.m/2015-01-01-3.json'}""")
load_json_data.call(table_name, """${getS3Url() + '/regression/gharchive.m/2022-11-07-16.json'}""")
load_json_data.call(table_name, """${getS3Url() + '/regression/gharchive.m/2022-11-07-10.json'}""")
load_json_data.call(table_name, """${getS3Url() + '/regression/gharchive.m/2022-11-07-22.json'}""")
load_json_data.call(table_name, """${getS3Url() + '/regression/gharchive.m/2022-11-07-23.json'}""")

sql """DROP TABLE IF EXISTS github_events_2"""
sql """
CREATE TABLE IF NOT EXISTS `github_events_2` (
`k` BIGINT NULL,
`v` text NULL,
INDEX idx_var (`v`) USING INVERTED PROPERTIES("parser" = "english") COMMENT ''
) ENGINE = OLAP DUPLICATE KEY(`k`) COMMENT 'OLAP' DISTRIBUTED BY HASH(`k`) BUCKETS 1 PROPERTIES (
"replication_allocation" = "tag.location.default: 1"
);
"""

sql """
insert into github_events_2 select 1, cast(v["repo"]["name"] as string) FROM github_events;
"""
qt_sql_select_count """ select count(*) from github_events_2; """
} finally {
set_be_config("write_buffer_size", "209715200")
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ suite("regression_test_variant_github_events_p0", "nonConcurrent"){
set 'read_json_by_line', 'true'
set 'format', 'json'
set 'max_filter_ratio', '0.1'
set 'memtable_on_sink_node', 'true'
file file_name // import json file
time 10000 // limit inflight 10s

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ suite("regression_test_variant_github_events_p0", "nonConcurrent"){
set 'read_json_by_line', 'true'
set 'format', 'json'
set 'max_filter_ratio', '0.1'
set 'memtable_on_sink_node', 'true'
file file_name // import json file
time 10000 // limit inflight 10s

Expand Down
1 change: 1 addition & 0 deletions regression-test/suites/variant_p0/load.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ suite("regression_test_variant", "nonConcurrent"){
set 'read_json_by_line', 'true'
set 'format', 'json'
set 'max_filter_ratio', '0.1'
set 'memtable_on_sink_node', 'true'
file file_name // import json file
time 10000 // limit inflight 10s

Expand Down