From d08cdd05b1df5f1e391d8ba7e6791a0aa9b0377b Mon Sep 17 00:00:00 2001 From: Socrates Date: Thu, 26 Sep 2024 13:21:20 +0800 Subject: [PATCH 1/8] parrallel run docker compose --- .../thirdparties/run-thirdparties-docker.sh | 142 +++++++++++++----- 1 file changed, 107 insertions(+), 35 deletions(-) diff --git a/docker/thirdparties/run-thirdparties-docker.sh b/docker/thirdparties/run-thirdparties-docker.sh index 36d2486a1c32be..3ae50e4c092588 100755 --- a/docker/thirdparties/run-thirdparties-docker.sh +++ b/docker/thirdparties/run-thirdparties-docker.sh @@ -182,7 +182,7 @@ for element in "${COMPONENTS_ARR[@]}"; do fi done -if [[ "${RUN_ES}" -eq 1 ]]; then +start_es() { # elasticsearch cp "${ROOT}"/docker-compose/elasticsearch/es.yaml.tpl "${ROOT}"/docker-compose/elasticsearch/es.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/elasticsearch/es.yaml @@ -205,9 +205,9 @@ if [[ "${RUN_ES}" -eq 1 ]]; then sudo chmod -R 777 "${ROOT}"/docker-compose/elasticsearch/config sudo docker compose -f "${ROOT}"/docker-compose/elasticsearch/es.yaml --env-file "${ROOT}"/docker-compose/elasticsearch/es.env up -d --remove-orphans fi -fi +} -if [[ "${RUN_MYSQL}" -eq 1 ]]; then +start_mysql() { # mysql 5.7 cp "${ROOT}"/docker-compose/mysql/mysql-5.7.yaml.tpl "${ROOT}"/docker-compose/mysql/mysql-5.7.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/mysql/mysql-5.7.yaml @@ -217,9 +217,9 @@ if [[ "${RUN_MYSQL}" -eq 1 ]]; then sudo mkdir -p "${ROOT}"/docker-compose/mysql/data/ sudo docker compose -f "${ROOT}"/docker-compose/mysql/mysql-5.7.yaml --env-file "${ROOT}"/docker-compose/mysql/mysql-5.7.env up -d fi -fi +} -if [[ "${RUN_PG}" -eq 1 ]]; then +start_pg() { # pg 14 cp "${ROOT}"/docker-compose/postgresql/postgresql-14.yaml.tpl "${ROOT}"/docker-compose/postgresql/postgresql-14.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/postgresql/postgresql-14.yaml @@ -229,9 +229,9 @@ if [[ "${RUN_PG}" -eq 1 ]]; then sudo mkdir -p "${ROOT}"/docker-compose/postgresql/data/data sudo docker compose -f "${ROOT}"/docker-compose/postgresql/postgresql-14.yaml --env-file "${ROOT}"/docker-compose/postgresql/postgresql-14.env up -d fi -fi +} -if [[ "${RUN_ORACLE}" -eq 1 ]]; then +start_oracle() { # oracle cp "${ROOT}"/docker-compose/oracle/oracle-11.yaml.tpl "${ROOT}"/docker-compose/oracle/oracle-11.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/oracle/oracle-11.yaml @@ -241,9 +241,9 @@ if [[ "${RUN_ORACLE}" -eq 1 ]]; then sudo mkdir -p "${ROOT}"/docker-compose/oracle/data/ sudo docker compose -f "${ROOT}"/docker-compose/oracle/oracle-11.yaml --env-file "${ROOT}"/docker-compose/oracle/oracle-11.env up -d fi -fi +} -if [[ "${RUN_DB2}" -eq 1 ]]; then +start_db2() { # db2 cp "${ROOT}"/docker-compose/db2/db2.yaml.tpl "${ROOT}"/docker-compose/db2/db2.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/db2/db2.yaml @@ -253,9 +253,9 @@ if [[ "${RUN_DB2}" -eq 1 ]]; then sudo mkdir -p "${ROOT}"/docker-compose/db2/data/ sudo docker compose -f "${ROOT}"/docker-compose/db2/db2.yaml --env-file "${ROOT}"/docker-compose/db2/db2.env up -d fi -fi +} -if [[ "${RUN_OCEANBASE}" -eq 1 ]]; then +start_oceanbase() { # oceanbase cp "${ROOT}"/docker-compose/oceanbase/oceanbase.yaml.tpl "${ROOT}"/docker-compose/oceanbase/oceanbase.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/oceanbase/oceanbase.yaml @@ -265,9 +265,9 @@ if [[ "${RUN_OCEANBASE}" -eq 1 ]]; then sudo mkdir -p "${ROOT}"/docker-compose/oceanbase/data/ sudo docker compose -f "${ROOT}"/docker-compose/oceanbase/oceanbase.yaml --env-file "${ROOT}"/docker-compose/oceanbase/oceanbase.env up -d fi -fi +} -if [[ "${RUN_SQLSERVER}" -eq 1 ]]; then +start_sqlserver() { # sqlserver cp "${ROOT}"/docker-compose/sqlserver/sqlserver.yaml.tpl "${ROOT}"/docker-compose/sqlserver/sqlserver.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/sqlserver/sqlserver.yaml @@ -277,9 +277,9 @@ if [[ "${RUN_SQLSERVER}" -eq 1 ]]; then sudo mkdir -p "${ROOT}"/docker-compose/sqlserver/data/ sudo docker compose -f "${ROOT}"/docker-compose/sqlserver/sqlserver.yaml --env-file "${ROOT}"/docker-compose/sqlserver/sqlserver.env up -d fi -fi +} -if [[ "${RUN_CLICKHOUSE}" -eq 1 ]]; then +start_clickhouse() { # clickhouse cp "${ROOT}"/docker-compose/clickhouse/clickhouse.yaml.tpl "${ROOT}"/docker-compose/clickhouse/clickhouse.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/clickhouse/clickhouse.yaml @@ -289,9 +289,9 @@ if [[ "${RUN_CLICKHOUSE}" -eq 1 ]]; then sudo mkdir -p "${ROOT}"/docker-compose/clickhouse/data/ sudo docker compose -f "${ROOT}"/docker-compose/clickhouse/clickhouse.yaml --env-file "${ROOT}"/docker-compose/clickhouse/clickhouse.env up -d fi -fi +} -if [[ "${RUN_KAFKA}" -eq 1 ]]; then +start_kafka() { # kafka KAFKA_CONTAINER_ID="${CONTAINER_UID}kafka" eth_name=$(ifconfig -a | grep -E "^eth[0-9]" | sort -k1.4n | awk -F ':' '{print $1}' | head -n 1) @@ -320,9 +320,9 @@ if [[ "${RUN_KAFKA}" -eq 1 ]]; then sleep 10s create_kafka_topics "${KAFKA_CONTAINER_ID}" "${IP_HOST}" fi -fi +} -if [[ "${RUN_HIVE2}" -eq 1 ]]; then +start_hive2() { # hive2 # If the doris cluster you need to test is single-node, you can use the default values; If the doris cluster you need to test is composed of multiple nodes, then you need to set the IP_HOST according to the actual situation of your machine #default value @@ -360,9 +360,9 @@ if [[ "${RUN_HIVE2}" -eq 1 ]]; then if [[ "${STOP}" -ne 1 ]]; then sudo docker compose -p ${CONTAINER_UID}hive2 -f "${ROOT}"/docker-compose/hive/hive-2x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive-2x.env up --build --remove-orphans -d fi -fi +} -if [[ "${RUN_HIVE3}" -eq 1 ]]; then +start_hive3() { # hive3 # If the doris cluster you need to test is single-node, you can use the default values; If the doris cluster you need to test is composed of multiple nodes, then you need to set the IP_HOST according to the actual situation of your machine #default value @@ -399,16 +399,16 @@ if [[ "${RUN_HIVE3}" -eq 1 ]]; then if [[ "${STOP}" -ne 1 ]]; then sudo docker compose -p ${CONTAINER_UID}hive3 -f "${ROOT}"/docker-compose/hive/hive-3x.yaml --env-file "${ROOT}"/docker-compose/hive/hadoop-hive-3x.env up --build --remove-orphans -d fi -fi +} -if [[ "${RUN_SPARK}" -eq 1 ]]; then +start_spark() { sudo docker compose -f "${ROOT}"/docker-compose/spark/spark.yaml down if [[ "${STOP}" -ne 1 ]]; then sudo docker compose -f "${ROOT}"/docker-compose/spark/spark.yaml up --build --remove-orphans -d fi -fi +} -if [[ "${RUN_ICEBERG}" -eq 1 ]]; then +start_iceberg() { # iceberg ICEBERG_DIR=${ROOT}/docker-compose/iceberg cp "${ROOT}"/docker-compose/iceberg/iceberg.yaml.tpl "${ROOT}"/docker-compose/iceberg/iceberg.yaml @@ -433,9 +433,9 @@ if [[ "${RUN_ICEBERG}" -eq 1 ]]; then sudo docker compose -f "${ROOT}"/docker-compose/iceberg/iceberg.yaml --env-file "${ROOT}"/docker-compose/iceberg/iceberg.env up -d fi -fi +} -if [[ "${RUN_HUDI}" -eq 1 ]]; then +start_hudi() { # hudi cp "${ROOT}"/docker-compose/hudi/hudi.yaml.tpl "${ROOT}"/docker-compose/hudi/hudi.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/hudi/hudi.yaml @@ -455,9 +455,9 @@ if [[ "${RUN_HUDI}" -eq 1 ]]; then docker exec -it adhoc-1 /bin/bash /var/scripts/setup_demo_container_adhoc_1.sh docker exec -it adhoc-2 /bin/bash /var/scripts/setup_demo_container_adhoc_2.sh fi -fi +} -if [[ "${RUN_TRINO}" -eq 1 ]]; then +start_trino() { # trino trino_docker="${ROOT}"/docker-compose/trino TRINO_CONTAINER_ID="${CONTAINER_UID}trino" @@ -530,9 +530,9 @@ if [[ "${RUN_TRINO}" -eq 1 ]]; then # execute create table sql docker exec -it ${TRINO_CONTAINER_ID} /bin/bash -c 'trino -f /scripts/create_trino_table.sql' fi -fi +} -if [[ "${RUN_MARIADB}" -eq 1 ]]; then +start_mariadb() { # mariadb cp "${ROOT}"/docker-compose/mariadb/mariadb-10.yaml.tpl "${ROOT}"/docker-compose/mariadb/mariadb-10.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/mariadb/mariadb-10.yaml @@ -542,9 +542,9 @@ if [[ "${RUN_MARIADB}" -eq 1 ]]; then sudo rm "${ROOT}"/docker-compose/mariadb/data/* -rf sudo docker compose -f "${ROOT}"/docker-compose/mariadb/mariadb-10.yaml --env-file "${ROOT}"/docker-compose/mariadb/mariadb-10.env up -d fi -fi +} -if [[ "${RUN_LAKESOUL}" -eq 1 ]]; then +start_lakesoul() { echo "RUN_LAKESOUL" cp "${ROOT}"/docker-compose/lakesoul/lakesoul.yaml.tpl "${ROOT}"/docker-compose/lakesoul/lakesoul.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/lakesoul/lakesoul.yaml @@ -575,9 +575,9 @@ if [[ "${RUN_LAKESOUL}" -eq 1 ]]; then cd LakeSoul/rust cargo test load_tpch_data --package lakesoul-datafusion --features=ci -- --nocapture fi -fi +} -if [[ "${RUN_KERBEROS}" -eq 1 ]]; then +start_kerberos() { echo "RUN_KERBEROS" cp "${ROOT}"/docker-compose/kerberos/kerberos.yaml.tpl "${ROOT}"/docker-compose/kerberos/kerberos.yaml sed -i "s/doris--/${CONTAINER_UID}/g" "${ROOT}"/docker-compose/kerberos/kerberos.yaml @@ -599,4 +599,76 @@ if [[ "${RUN_KERBEROS}" -eq 1 ]]; then echo '172.31.71.26 hadoop-master-2' >> /etc/hosts sleep 2 fi +} + +if [[ "${RUN_ES}" -eq 1 ]]; then + start_es > /dev/null 2>&1 & +fi + +if [[ "${RUN_MYSQL}" -eq 1 ]]; then + start_mysql > /dev/null 2>&1 & +fi + +if [[ "${RUN_PG}" -eq 1 ]]; then + start_pg > /dev/null 2>&1 & +fi + +if [[ "${RUN_ORACLE}" -eq 1 ]]; then + start_oracle > /dev/null 2>&1 & +fi + +if [[ "${RUN_DB2}" -eq 1 ]]; then + start_db2 > /dev/null 2>&1 & +fi + +if [[ "${RUN_OCEANBASE}" -eq 1 ]]; then + start_oceanbase > /dev/null 2>&1 & +fi + +if [[ "${RUN_SQLSERVER}" -eq 1 ]]; then + start_sqlserver > /dev/null 2>&1 & +fi + +if [[ "${RUN_CLICKHOUSE}" -eq 1 ]]; then + start_clickhouse > /dev/null 2>&1 & +fi + +if [[ "${RUN_KAFKA}" -eq 1 ]]; then + start_kafka > /dev/null 2>&1 & +fi + +if [[ "${RUN_HIVE2}" -eq 1 ]]; then + start_hive2 > /dev/null 2>&1 & +fi + +if [[ "${RUN_HIVE3}" -eq 1 ]]; then + start_hive3 > /dev/null 2>&1 & +fi + +if [[ "${RUN_SPARK}" -eq 1 ]]; then + start_spark > /dev/null 2>&1 & +fi + +if [[ "${RUN_ICEBERG}" -eq 1 ]]; then + start_iceberg > /dev/null 2>&1 & +fi + +if [[ "${RUN_HUDI}" -eq 1 ]]; then + start_hudi > /dev/null 2>&1 & +fi + +if [[ "${RUN_TRINO}" -eq 1 ]]; then + start_trino > /dev/null 2>&1 & +fi + +if [[ "${RUN_MARIADB}" -eq 1 ]]; then + start_mariadb > /dev/null 2>&1 & +fi + +if [[ "${RUN_LAKESOUL}" -eq 1 ]]; then + start_lakesoul > /dev/null 2>&1 & +fi + +if [[ "${RUN_KERBEROS}" -eq 1 ]]; then + start_kerberos > /dev/null 2>&1 & fi From 329728a90da0cae1cad92ac07f395c0f009abfe8 Mon Sep 17 00:00:00 2001 From: Socrates Date: Thu, 26 Sep 2024 13:36:27 +0800 Subject: [PATCH 2/8] catch log --- .../thirdparties/run-thirdparties-docker.sh | 41 +++++++++++-------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/docker/thirdparties/run-thirdparties-docker.sh b/docker/thirdparties/run-thirdparties-docker.sh index 3ae50e4c092588..fb0de7907c4496 100755 --- a/docker/thirdparties/run-thirdparties-docker.sh +++ b/docker/thirdparties/run-thirdparties-docker.sh @@ -601,74 +601,79 @@ start_kerberos() { fi } +echo "starting dockers in parrallel" + if [[ "${RUN_ES}" -eq 1 ]]; then - start_es > /dev/null 2>&1 & + start_es > start_es.log 2>&1 & fi if [[ "${RUN_MYSQL}" -eq 1 ]]; then - start_mysql > /dev/null 2>&1 & + start_mysql > start_mysql.log 2>&1 & fi if [[ "${RUN_PG}" -eq 1 ]]; then - start_pg > /dev/null 2>&1 & + start_pg > start_pg.log 2>&1 & fi if [[ "${RUN_ORACLE}" -eq 1 ]]; then - start_oracle > /dev/null 2>&1 & + start_oracle > start_oracle.log 2>&1 & fi if [[ "${RUN_DB2}" -eq 1 ]]; then - start_db2 > /dev/null 2>&1 & + start_db2 > start_db2.log 2>&1 & fi if [[ "${RUN_OCEANBASE}" -eq 1 ]]; then - start_oceanbase > /dev/null 2>&1 & + start_oceanbase > start_oceanbase.log 2>&1 & fi if [[ "${RUN_SQLSERVER}" -eq 1 ]]; then - start_sqlserver > /dev/null 2>&1 & + start_sqlserver > start_sqlserver.log 2>&1 & fi if [[ "${RUN_CLICKHOUSE}" -eq 1 ]]; then - start_clickhouse > /dev/null 2>&1 & + start_clickhouse > start_clickhouse.log 2>&1 & fi if [[ "${RUN_KAFKA}" -eq 1 ]]; then - start_kafka > /dev/null 2>&1 & + start_kafka > start_kafka.log 2>&1 & fi if [[ "${RUN_HIVE2}" -eq 1 ]]; then - start_hive2 > /dev/null 2>&1 & + start_hive2 > start_hive2.log 2>&1 & fi if [[ "${RUN_HIVE3}" -eq 1 ]]; then - start_hive3 > /dev/null 2>&1 & + start_hive3 > start_hive3.log 2>&1 & fi if [[ "${RUN_SPARK}" -eq 1 ]]; then - start_spark > /dev/null 2>&1 & + start_spark > start_spark.log 2>&1 & fi if [[ "${RUN_ICEBERG}" -eq 1 ]]; then - start_iceberg > /dev/null 2>&1 & + start_iceberg > start_icerberg.log 2>&1 & fi if [[ "${RUN_HUDI}" -eq 1 ]]; then - start_hudi > /dev/null 2>&1 & + start_hudi > start_hudi.log 2>&1 & fi if [[ "${RUN_TRINO}" -eq 1 ]]; then - start_trino > /dev/null 2>&1 & + start_trino > start_trino.log 2>&1 & fi if [[ "${RUN_MARIADB}" -eq 1 ]]; then - start_mariadb > /dev/null 2>&1 & + start_mariadb > start_mariadb.log 2>&1 & fi if [[ "${RUN_LAKESOUL}" -eq 1 ]]; then - start_lakesoul > /dev/null 2>&1 & + start_lakesoul > start_lakesoule.log 2>&1 & fi if [[ "${RUN_KERBEROS}" -eq 1 ]]; then - start_kerberos > /dev/null 2>&1 & + start_kerberos > start_kerberos.log 2>&1 & fi + +echo "waiting all dockers starting done" +wait \ No newline at end of file From e7cb1646c8bc638aec4e800d12676be78db7fe7f Mon Sep 17 00:00:00 2001 From: Socrates Date: Thu, 26 Sep 2024 17:01:36 +0800 Subject: [PATCH 3/8] handle the error of subprocess --- .../thirdparties/run-thirdparties-docker.sh | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/docker/thirdparties/run-thirdparties-docker.sh b/docker/thirdparties/run-thirdparties-docker.sh index fb0de7907c4496..54dbe00753ec76 100755 --- a/docker/thirdparties/run-thirdparties-docker.sh +++ b/docker/thirdparties/run-thirdparties-docker.sh @@ -603,77 +603,106 @@ start_kerberos() { echo "starting dockers in parrallel" +pids=() + if [[ "${RUN_ES}" -eq 1 ]]; then start_es > start_es.log 2>&1 & + pids+=($!) fi if [[ "${RUN_MYSQL}" -eq 1 ]]; then start_mysql > start_mysql.log 2>&1 & + pids+=($!) fi if [[ "${RUN_PG}" -eq 1 ]]; then start_pg > start_pg.log 2>&1 & + pids+=($!) fi if [[ "${RUN_ORACLE}" -eq 1 ]]; then start_oracle > start_oracle.log 2>&1 & + pids+=($!) fi if [[ "${RUN_DB2}" -eq 1 ]]; then start_db2 > start_db2.log 2>&1 & + pids+=($!) fi if [[ "${RUN_OCEANBASE}" -eq 1 ]]; then start_oceanbase > start_oceanbase.log 2>&1 & + pids+=($!) fi if [[ "${RUN_SQLSERVER}" -eq 1 ]]; then start_sqlserver > start_sqlserver.log 2>&1 & + pids+=($!) fi if [[ "${RUN_CLICKHOUSE}" -eq 1 ]]; then start_clickhouse > start_clickhouse.log 2>&1 & + pids+=($!) fi if [[ "${RUN_KAFKA}" -eq 1 ]]; then start_kafka > start_kafka.log 2>&1 & + pids+=($!) fi if [[ "${RUN_HIVE2}" -eq 1 ]]; then start_hive2 > start_hive2.log 2>&1 & + pids+=($!) fi if [[ "${RUN_HIVE3}" -eq 1 ]]; then start_hive3 > start_hive3.log 2>&1 & + pids+=($!) fi if [[ "${RUN_SPARK}" -eq 1 ]]; then start_spark > start_spark.log 2>&1 & + pids+=($!) fi if [[ "${RUN_ICEBERG}" -eq 1 ]]; then start_iceberg > start_icerberg.log 2>&1 & + pids+=($!) fi if [[ "${RUN_HUDI}" -eq 1 ]]; then start_hudi > start_hudi.log 2>&1 & + pids+=($!) fi if [[ "${RUN_TRINO}" -eq 1 ]]; then start_trino > start_trino.log 2>&1 & + pids+=($!) fi if [[ "${RUN_MARIADB}" -eq 1 ]]; then start_mariadb > start_mariadb.log 2>&1 & + pids+=($!) fi if [[ "${RUN_LAKESOUL}" -eq 1 ]]; then start_lakesoul > start_lakesoule.log 2>&1 & + pids+=($!) fi if [[ "${RUN_KERBEROS}" -eq 1 ]]; then start_kerberos > start_kerberos.log 2>&1 & + pids+=($!) fi echo "waiting all dockers starting done" -wait \ No newline at end of file + +for pid in "${pids[@]}"; do + wait "${pid}" + if [ $? -ne 0 ]; then + echo "one of the dockers started failed, exiting" + exit 1 + fi +done + +echo "all dockers started successfully" From f81f5199bd68ac04f08cd327e22ae33e432b81c0 Mon Sep 17 00:00:00 2001 From: Socrates Date: Fri, 27 Sep 2024 00:59:53 +0800 Subject: [PATCH 4/8] fix --- .../thirdparties/run-thirdparties-docker.sh | 48 ++++++++++--------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/docker/thirdparties/run-thirdparties-docker.sh b/docker/thirdparties/run-thirdparties-docker.sh index 54dbe00753ec76..03aa912272a4f8 100755 --- a/docker/thirdparties/run-thirdparties-docker.sh +++ b/docker/thirdparties/run-thirdparties-docker.sh @@ -603,104 +603,106 @@ start_kerberos() { echo "starting dockers in parrallel" -pids=() +declare -A pids if [[ "${RUN_ES}" -eq 1 ]]; then start_es > start_es.log 2>&1 & - pids+=($!) + pids["es"]=$! fi if [[ "${RUN_MYSQL}" -eq 1 ]]; then start_mysql > start_mysql.log 2>&1 & - pids+=($!) + pids["mysql"]=$! fi if [[ "${RUN_PG}" -eq 1 ]]; then start_pg > start_pg.log 2>&1 & - pids+=($!) + pids["pg"]=$! fi if [[ "${RUN_ORACLE}" -eq 1 ]]; then start_oracle > start_oracle.log 2>&1 & - pids+=($!) + pids["oracle"]=$! fi if [[ "${RUN_DB2}" -eq 1 ]]; then start_db2 > start_db2.log 2>&1 & - pids+=($!) + pids["db2"]=$! fi if [[ "${RUN_OCEANBASE}" -eq 1 ]]; then start_oceanbase > start_oceanbase.log 2>&1 & - pids+=($!) + pids["oceanbase"]=$! fi if [[ "${RUN_SQLSERVER}" -eq 1 ]]; then start_sqlserver > start_sqlserver.log 2>&1 & - pids+=($!) + pids["sqlserver"]=$! fi if [[ "${RUN_CLICKHOUSE}" -eq 1 ]]; then start_clickhouse > start_clickhouse.log 2>&1 & - pids+=($!) + pids["clickhouse"]=$! fi if [[ "${RUN_KAFKA}" -eq 1 ]]; then start_kafka > start_kafka.log 2>&1 & - pids+=($!) + pids["kafka"]=$! fi if [[ "${RUN_HIVE2}" -eq 1 ]]; then start_hive2 > start_hive2.log 2>&1 & - pids+=($!) + pids["hive2"]=$! fi if [[ "${RUN_HIVE3}" -eq 1 ]]; then start_hive3 > start_hive3.log 2>&1 & - pids+=($!) + pids["hive3"]=$! fi if [[ "${RUN_SPARK}" -eq 1 ]]; then start_spark > start_spark.log 2>&1 & - pids+=($!) + pids["spark"]=$! fi if [[ "${RUN_ICEBERG}" -eq 1 ]]; then start_iceberg > start_icerberg.log 2>&1 & - pids+=($!) + pids["iceberg"]=$! fi if [[ "${RUN_HUDI}" -eq 1 ]]; then start_hudi > start_hudi.log 2>&1 & - pids+=($!) + pids["hudi"]=$! fi if [[ "${RUN_TRINO}" -eq 1 ]]; then start_trino > start_trino.log 2>&1 & - pids+=($!) + pids["trino"]=$! fi if [[ "${RUN_MARIADB}" -eq 1 ]]; then start_mariadb > start_mariadb.log 2>&1 & - pids+=($!) + pids["mariadb"]=$! fi if [[ "${RUN_LAKESOUL}" -eq 1 ]]; then start_lakesoul > start_lakesoule.log 2>&1 & - pids+=($!) + pids["lakesoul"]=$! fi if [[ "${RUN_KERBEROS}" -eq 1 ]]; then start_kerberos > start_kerberos.log 2>&1 & - pids+=($!) + pids["kerberos"]=$! fi echo "waiting all dockers starting done" -for pid in "${pids[@]}"; do - wait "${pid}" - if [ $? -ne 0 ]; then - echo "one of the dockers started failed, exiting" +for compose in "${!pids[@]}"; do + # prevent wait return 1 make the script exit + status=0 + wait "${pids[$compose]}" || status=$? + if [ $status -ne 0 ]; then + echo "docker $compose started failed with status $status" exit 1 fi done From fec92ea9afa20ba3d4adc3273a04607d36f32e18 Mon Sep 17 00:00:00 2001 From: Socrates Date: Fri, 27 Sep 2024 10:52:35 +0800 Subject: [PATCH 5/8] echo log when error --- docker/thirdparties/run-thirdparties-docker.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/thirdparties/run-thirdparties-docker.sh b/docker/thirdparties/run-thirdparties-docker.sh index 03aa912272a4f8..ca7a4eec75c453 100755 --- a/docker/thirdparties/run-thirdparties-docker.sh +++ b/docker/thirdparties/run-thirdparties-docker.sh @@ -703,6 +703,8 @@ for compose in "${!pids[@]}"; do wait "${pids[$compose]}" || status=$? if [ $status -ne 0 ]; then echo "docker $compose started failed with status $status" + echo "print start_${compose}.log" + cat start_${compose}.log exit 1 fi done From 5562025809f0a0b0d180b6be65782102e8547c68 Mon Sep 17 00:00:00 2001 From: Socrates Date: Fri, 27 Sep 2024 15:21:53 +0800 Subject: [PATCH 6/8] add lock to async hive2 and hive3 init --- .../hive/scripts/hive-metastore.sh | 32 ++++++++++++++++--- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh b/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh index 5cf08f1f429bf5..154550abd9b577 100755 --- a/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh +++ b/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh @@ -27,6 +27,15 @@ sleep 10s # new cases should use separate dir hadoop fs -mkdir -p /user/doris/suites/ +lockfile1 = "mnt/scripts/run-data.lock" + +# wait lockfile +while [ -f "$lockfile1" ]; do + sleep 10 +done + +touch "$lockfile1" + DATA_DIR="/mnt/scripts/data/" find "${DATA_DIR}" -type f -name "run.sh" -print0 | xargs -0 -n 1 -P 10 -I {} sh -c ' START_TIME=$(date +%s) @@ -36,6 +45,17 @@ find "${DATA_DIR}" -type f -name "run.sh" -print0 | xargs -0 -n 1 -P 10 -I {} sh echo "Script: {} executed in $EXECUTION_TIME seconds" ' +rm -f "$lockfile1" + +lockfile2 = "mnt/scripts/download-data.lock" + +# wait lockfile +while [ -f "$lockfile2" ]; do + sleep 10 +done + +touch "$lockfile2" + # if you test in your local,better use # to annotation section about tpch1.db if [[ ! -d "/mnt/scripts/tpch1.db" ]]; then echo "/mnt/scripts/tpch1.db does not exist" @@ -48,11 +68,6 @@ else echo "/mnt/scripts/tpch1.db exist, continue !" fi -# put data file -## put tpch1 -hadoop fs -mkdir -p /user/doris/ -hadoop fs -put /mnt/scripts/tpch1.db /user/doris/ - # paimon data file is small and update frequently, so we download it every time rm -rf "/mnt/scripts/paimon1" echo "/mnt/scripts/paimon1 does not exist" @@ -62,6 +77,13 @@ tar -zxf paimon1.tar.gz rm -rf paimon1.tar.gz cd - +rm -f "$lockfile2" + +# put data file +## put tpch1 +hadoop fs -mkdir -p /user/doris/ +hadoop fs -put /mnt/scripts/tpch1.db /user/doris/ + ## put paimon1 hadoop fs -put /mnt/scripts/paimon1 /user/doris/ From f5406e9d355f667a1b5a5c043ea164421c24311d Mon Sep 17 00:00:00 2001 From: Socrates Date: Fri, 27 Sep 2024 15:28:26 +0800 Subject: [PATCH 7/8] ignore db2 failed --- docker/thirdparties/run-thirdparties-docker.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/thirdparties/run-thirdparties-docker.sh b/docker/thirdparties/run-thirdparties-docker.sh index ca7a4eec75c453..e6a5bb43771e16 100755 --- a/docker/thirdparties/run-thirdparties-docker.sh +++ b/docker/thirdparties/run-thirdparties-docker.sh @@ -701,7 +701,7 @@ for compose in "${!pids[@]}"; do # prevent wait return 1 make the script exit status=0 wait "${pids[$compose]}" || status=$? - if [ $status -ne 0 ]; then + if [ $status -ne 0 ] && [ $compose != "db2" ]; then echo "docker $compose started failed with status $status" echo "print start_${compose}.log" cat start_${compose}.log From 48ce73d677e6418140d02ccdb854c42857a65cad Mon Sep 17 00:00:00 2001 From: Socrates Date: Fri, 27 Sep 2024 17:00:26 +0800 Subject: [PATCH 8/8] lock when downloading tvf_data --- .../hive/scripts/hive-metastore.sh | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh b/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh index 154550abd9b577..46d1437db29feb 100755 --- a/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh +++ b/docker/thirdparties/docker-compose/hive/scripts/hive-metastore.sh @@ -77,16 +77,6 @@ tar -zxf paimon1.tar.gz rm -rf paimon1.tar.gz cd - -rm -f "$lockfile2" - -# put data file -## put tpch1 -hadoop fs -mkdir -p /user/doris/ -hadoop fs -put /mnt/scripts/tpch1.db /user/doris/ - -## put paimon1 -hadoop fs -put /mnt/scripts/paimon1 /user/doris/ - # download tvf_data if [[ ! -d "/mnt/scripts/tvf_data" ]]; then echo "/mnt/scripts/tvf_data does not exist" @@ -99,6 +89,16 @@ else echo "/mnt/scripts/tvf_data exist, continue !" fi +rm -f "$lockfile2" + +# put data file +## put tpch1 +hadoop fs -mkdir -p /user/doris/ +hadoop fs -put /mnt/scripts/tpch1.db /user/doris/ + +## put paimon1 +hadoop fs -put /mnt/scripts/paimon1 /user/doris/ + ## put tvf_data hadoop fs -put /mnt/scripts/tvf_data /user/doris/