From f041bacfba546ec74424432efac8b89c084c178c Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Sat, 10 Apr 2021 14:29:51 +0100 Subject: [PATCH] test runner Signed-off-by: Wenqi Li --- .github/workflows/cron.yml | 22 ++++++++-------------- .github/workflows/docker.yml | 8 ++++++++ .github/workflows/integration.yml | 3 +++ .github/workflows/pythonapp.yml | 7 +++---- .github/workflows/setupapp.yml | 9 +++------ 5 files changed, 25 insertions(+), 24 deletions(-) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 761b1f7ebc..273eec3763 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -40,17 +40,13 @@ jobs: nvidia-smi export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) echo $CUDA_VISIBLE_DEVICES - stop_time=$((LAUNCH_DELAY + $(date +%s))) - while [ $(date +%s) -lt $stop_time ]; do - python -c 'import torch; torch.rand(5, 3, device=torch.device("cuda:0"))'; - done + python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' BUILD_MONAI=1 ./runtests.sh --coverage --unittests # unit tests with coverage report - export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) - echo $CUDA_VISIBLE_DEVICES BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report coverage xml + if pgrep python; then pkill python; fi - name: Upload coverage uses: codecov/codecov-action@v1 with: @@ -83,17 +79,13 @@ jobs: nvidia-smi export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) echo $CUDA_VISIBLE_DEVICES - stop_time=$((LAUNCH_DELAY + $(date +%s))) - while [ $(date +%s) -lt $stop_time ]; do - python -c 'import torch; torch.rand(5, 3, device=torch.device("cuda:0"))'; - done + python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' BUILD_MONAI=1 ./runtests.sh --coverage --unittests # unit tests with coverage report - export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) - echo $CUDA_VISIBLE_DEVICES BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report coverage xml + if pgrep python; then pkill python; fi - name: Upload coverage uses: codecov/codecov-action@v1 with: @@ -115,14 +107,14 @@ jobs: nvidia-smi export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) echo $CUDA_VISIBLE_DEVICES + python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' ngc --version BUILD_MONAI=1 ./runtests.sh --coverage --pytype --unittests # unit tests with pytype checks, coverage report - export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) - echo $CUDA_VISIBLE_DEVICES BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report coverage xml + if pgrep python; then pkill python; fi - name: Upload coverage uses: codecov/codecov-action@v1 with: @@ -159,5 +151,7 @@ jobs: run: | export CUDA_VISIBLE_DEVICES=${{ steps.monai-install.outputs.devices }} echo $CUDA_VISIBLE_DEVICES + python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & cd /opt/tutorials $(pwd)/runner.sh + if pgrep python; then pkill python; fi diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 54449a8dba..abd3a2fc7e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -87,11 +87,15 @@ jobs: steps: - name: Import run: | + export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) + echo $CUDA_VISIBLE_DEVICES + python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c 'import monai; monai.config.print_config()' cd /opt/monai ls -al ngc --version python -m tests.min_tests + if pgrep python; then pkill python; fi env: QUICKTEST: True @@ -104,10 +108,14 @@ jobs: steps: - name: Import run: | + export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) + echo $CUDA_VISIBLE_DEVICES + python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c 'import monai; monai.config.print_config()' cd /opt/monai ls -al ngc --version python -m tests.min_tests + if pgrep python; then pkill python; fi env: QUICKTEST: True diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 5f160e6e8e..227e0b3b71 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -42,9 +42,12 @@ jobs: nvidia-smi export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) echo $CUDA_VISIBLE_DEVICES + python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))' BUILD_MONAI=1 ./runtests.sh --unittests --net + if pgrep python; then pkill python; fi + shell: bash - name: Add reaction uses: peter-evans/create-or-update-comment@v1 with: diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 30e6102965..bdf35be1c9 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -290,10 +290,7 @@ jobs: sleep $LAUNCH_DELAY export CUDA_VISIBLE_DEVICES=$(coverage run -m tests.utils) echo $CUDA_VISIBLE_DEVICES - stop_time=$((LAUNCH_DELAY + $(date +%s))) - while [ $(date +%s) -lt $stop_time ]; do - python -c 'import torch; torch.rand(5, 3, device=torch.device("cuda:0"))'; - done + python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' python -c "import monai; monai.config.print_config()" @@ -303,6 +300,8 @@ jobs: coverage run -m tests.clang_format_utils fi coverage xml + if pgrep python; then pkill python; fi + shell: bash - name: Upload coverage uses: codecov/codecov-action@v1 with: diff --git a/.github/workflows/setupapp.yml b/.github/workflows/setupapp.yml index dc65141fe8..8b3292c4a4 100644 --- a/.github/workflows/setupapp.yml +++ b/.github/workflows/setupapp.yml @@ -47,17 +47,14 @@ jobs: nvidia-smi export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) echo $CUDA_VISIBLE_DEVICES - stop_time=$((LAUNCH_DELAY + $(date +%s))) - while [ $(date +%s) -lt $stop_time ]; do - python -c 'import torch; torch.rand(5, 3, device=torch.device("cuda:0"))'; - done + python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null & python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))" python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))' BUILD_MONAI=1 ./runtests.sh --coverage --unittests # unit tests with coverage report - export CUDA_VISIBLE_DEVICES=$(python -m tests.utils) - echo $CUDA_VISIBLE_DEVICES BUILD_MONAI=1 ./runtests.sh --coverage --net # integration tests with coverage report coverage xml + if pgrep python; then pkill python; fi + shell: bash - name: Upload coverage uses: codecov/codecov-action@v1 with: