Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .licenserc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -102,4 +102,5 @@ header:
- "pytest/deploy/*.conf"
- "tools/jeprof"
- "tools/FlameGraph/*"
- "fs_brokers/cdc_client/src/main/java/io/debezium/**"
comment: on-failure
Original file line number Diff line number Diff line change
Expand Up @@ -785,6 +785,9 @@ private String getShowSQL() {
sb.append("FROM ").append(dataSourceType.name());
sb.append("(");
for (Map.Entry<String, String> entry : sourceProperties.entrySet()) {
if (entry.getKey().equalsIgnoreCase("password")) {
continue;
}
sb.append("'").append(entry.getKey())
.append("'='").append(entry.getValue()).append("',");
}
Expand Down Expand Up @@ -1116,6 +1119,8 @@ public void commitOffset(CommitOffsetRequest offsetRequest) throws JobException
}

persistOffsetProviderIfNeed();
log.info("Streaming multi table job {} task {} commit offset successfully, offset: {}",
getJobId(), offsetRequest.getTaskId(), offsetRequest.getOffset());
((StreamingMultiTblTask) this.runningStreamTask).successCallback(offsetRequest);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ public String getTimeoutReason() {
log.warn("Failed to get task timeout reason, response: {}", response);
}
} catch (ExecutionException | InterruptedException ex) {
log.error("Send get task fail reason request failed: ", ex);
log.error("Send get fail reason request failed: ", ex);
}
return "";
}
Expand Down
3 changes: 3 additions & 0 deletions fs_brokers/cdc_client/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,9 @@ under the License.
<version>${spotless.version}</version>
<configuration>
<java>
<includes>
<include>src/main/java/org/apache/doris/*.java</include>
</includes>
<googleJavaFormat>
<version>1.17.0</version>
<style>AOSP</style>
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,5 @@ public class Constants {
public static final long POLL_SPLIT_RECORDS_TIMEOUTS = 15000L;

// Debezium default properties
public static final long DEBEZIUM_HEARTBEAT_INTERVAL_MS = 10000L;
public static final String DEBEZIUM_MAX_QUEUE_SIZE = "162580";
public static final String DEBEZIUM_MAX_BATCH_SIZE = "40960";
public static final String DEBEZIUM_POLL_INTERVAL_MS = "50";
public static final long DEBEZIUM_HEARTBEAT_INTERVAL_MS = 3000L;
}

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
import java.util.concurrent.locks.ReentrantReadWriteLock;

import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.Getter;
import lombok.Setter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -88,7 +89,7 @@ public class DorisBatchStreamLoad implements Serializable {
private final Lock lock = new ReentrantLock();
private final Condition block = lock.newCondition();
private final Map<String, ReadWriteLock> bufferMapLock = new ConcurrentHashMap<>();
@Setter private String currentTaskId;
@Setter @Getter private String currentTaskId;
private String targetDb;
private long jobId;
@Setter private String token;
Expand Down Expand Up @@ -162,42 +163,54 @@ public void writeRecord(String database, String table, byte[] record) {
lock.unlock();
}
}

// Single table flush according to the STREAM_LOAD_MAX_BYTES
if (buffer.getBufferSizeBytes() >= STREAM_LOAD_MAX_BYTES) {
bufferFullFlush(bufferKey);
}
}

public void bufferFullFlush(String bufferKey) {
doFlush(bufferKey, false, true);
}

public boolean cacheFullFlush() {
return doFlush(true, true);
public void forceFlush() {
doFlush(null, true, false);
}

public boolean forceFlush() {
return doFlush(true, false);
public void cacheFullFlush() {
doFlush(null, true, true);
}

private synchronized boolean doFlush(boolean waitUtilDone, boolean cacheFull) {

private synchronized void doFlush(String bufferKey, boolean waitUtilDone, boolean bufferFull) {
checkFlushException();
if (waitUtilDone || cacheFull) {
return flush(waitUtilDone);
if (waitUtilDone || bufferFull) {
flush(bufferKey, waitUtilDone);
}
return false;
}

private synchronized boolean flush(boolean waitUtilDone) {
private synchronized void flush(String bufferKey, boolean waitUtilDone) {
if (!waitUtilDone && bufferMap.isEmpty()) {
// bufferMap may have been flushed by other threads
LOG.info("bufferMap is empty, no need to flush");
return false;
return;
}
for (String key : bufferMap.keySet()) {
if (waitUtilDone) {
// Ensure that the interval satisfies intervalMS
flushBuffer(key);

if (null == bufferKey) {
for (String key : bufferMap.keySet()) {
if (waitUtilDone) {
flushBuffer(key);
}
}
}
if (!waitUtilDone) {
return false;
} else if (bufferMap.containsKey(bufferKey)) {
flushBuffer(bufferKey);
} else {
LOG.warn("buffer not found for key: {}, may be already flushed.", bufferKey);
}
if (waitUtilDone) {
waitAsyncLoadFinish();
}
return true;
}

private synchronized void flushBuffer(String bufferKey) {
Expand Down Expand Up @@ -481,13 +494,14 @@ public void resetTaskId() {
}

/** commit offfset to frontends. */
public void commitOffset(Map<String, String> meta, long scannedRows, long scannedBytes) {
public void commitOffset(
String taskId, Map<String, String> meta, long scannedRows, long scannedBytes) {
try {
String url = String.format(COMMIT_URL_PATTERN, frontendAddress, targetDb);
Map<String, Object> commitParams = new HashMap<>();
commitParams.put("offset", OBJECT_MAPPER.writeValueAsString(meta));
commitParams.put("jobId", jobId);
commitParams.put("taskId", currentTaskId);
commitParams.put("taskId", taskId);
commitParams.put("scannedRows", scannedRows);
commitParams.put("scannedBytes", scannedBytes);
String param = OBJECT_MAPPER.writeValueAsString(commitParams);
Expand All @@ -501,8 +515,7 @@ public void commitOffset(Map<String, String> meta, long scannedRows, long scanne
.commit()
.setEntity(new StringEntity(param));

LOG.info(
"commit offset for jobId {} taskId {}, params {}", jobId, currentTaskId, param);
LOG.info("commit offset for jobId {} taskId {}, params {}", jobId, taskId, param);
Throwable resEx = null;
int retry = 0;
while (retry <= RETRY) {
Expand All @@ -516,7 +529,7 @@ public void commitOffset(Map<String, String> meta, long scannedRows, long scanne
: "";
LOG.info("commit result {}", responseBody);
if (statusCode == 200) {
LOG.info("commit offset for jobId {} taskId {}", jobId, currentTaskId);
LOG.info("commit offset for jobId {} taskId {}", jobId, taskId);
// A 200 response indicates that the request was successful, and
// information such as offset and statistics may have already been
// updated. Retrying may result in repeated updates.
Expand Down
Loading