From 06e9c45cddd560669d49b189f348a7d3234a834c Mon Sep 17 00:00:00 2001 From: Hugh Perkins Date: Mon, 9 Jun 2025 22:28:32 -0700 Subject: [PATCH 1/4] make test_ad_ndarray flaky --- tests/python/test_ad_ndarray.py | 52 +++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/tests/python/test_ad_ndarray.py b/tests/python/test_ad_ndarray.py index e7f99bd615..e29c1aa4ee 100644 --- a/tests/python/test_ad_ndarray.py +++ b/tests/python/test_ad_ndarray.py @@ -14,6 +14,7 @@ @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) +@pytest.mark.flaky(reruns=5) def test_simple_demo(): @test_utils.torch_op(output_shapes=[(1,)]) @ti.kernel @@ -31,6 +32,7 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) +@pytest.mark.flaky(reruns=5) def test_ad_reduce(): @test_utils.torch_op(output_shapes=[(1,)]) @ti.kernel @@ -85,6 +87,7 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray()): ) @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) +@pytest.mark.flaky(reruns=5) def test_poly(tifunc): s = (4,) @@ -101,6 +104,7 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) +@pytest.mark.flaky(reruns=5) def test_ad_select(): s = (4,) @@ -117,6 +121,7 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray(), z: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) +@pytest.mark.flaky(reruns=5) def test_ad_sum(): N = 10 @@ -148,6 +153,7 @@ def compute_sum(a: ti.types.ndarray(), b: ti.types.ndarray(), p: ti.types.ndarra @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) +@pytest.mark.flaky(reruns=5) def test_ad_sum_local_atomic(): N = 10 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -179,6 +185,7 @@ def compute_sum(a: ti.types.ndarray(), b: ti.types.ndarray(), p: ti.types.ndarra @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ad_power(): N = 10 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -210,6 +217,7 @@ def power(a: ti.types.ndarray(), b: ti.types.ndarray(), p: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ad_fibonacci(): N = 15 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -248,6 +256,7 @@ def fib(a: ti.types.ndarray(), b: ti.types.ndarray(), c: ti.types.ndarray(), f: @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f32, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ad_fibonacci_index(): N = 5 M = 10 @@ -280,6 +289,7 @@ def fib(a: ti.types.ndarray(), b: ti.types.ndarray(), f: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_integer_stack(): N = 5 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -319,6 +329,7 @@ def int_stack(a: ti.types.ndarray(), b: ti.types.ndarray(), c: ti.types.ndarray( @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_double_for_loops(): N = 5 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -357,6 +368,7 @@ def double_for(a: ti.types.ndarray(), b: ti.types.ndarray(), c: ti.types.ndarray @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_double_for_loops_more_nests(): N = 6 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -403,6 +415,7 @@ def double_for(a: ti.types.ndarray(), b: ti.types.ndarray(), c: ti.types.ndarray @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_complex_body(): N = 5 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -442,6 +455,7 @@ def complex(a: ti.types.ndarray(), c: ti.types.ndarray(), f: ti.types.ndarray(), @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_mixed_inner_loops(): x = ti.ndarray(dtype=ti.f32, shape=(1,), needs_grad=True) arr = ti.ndarray(dtype=ti.f32, shape=(5)) @@ -464,6 +478,7 @@ def mixed_inner_loops(x: ti.types.ndarray(), arr: ti.types.ndarray(), loss: ti.t @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_mixed_inner_loops_tape(): x = ti.ndarray(dtype=ti.f32, shape=(1,), needs_grad=True) arr = ti.ndarray(dtype=ti.f32, shape=(5)) @@ -484,6 +499,7 @@ def mixed_inner_loops_tape(x: ti.types.ndarray(), arr: ti.types.ndarray(), loss: @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=32) +@pytest.mark.flaky(reruns=5) def test_inner_loops_local_variable_fixed_stack_size_kernel_grad(): x = ti.ndarray(dtype=float, shape=(1), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -510,6 +526,7 @@ def test_inner_loops_local_variable(x: ti.types.ndarray(), arr: ti.types.ndarray @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=0) +@pytest.mark.flaky(reruns=5) def test_inner_loops_local_variable_adaptive_stack_size_tape(): x = ti.ndarray(dtype=float, shape=(1), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -535,6 +552,7 @@ def test_inner_loops_local_variable(x: ti.types.ndarray(), arr: ti.types.ndarray @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=0) +@pytest.mark.flaky(reruns=5) def test_more_inner_loops_local_variable_adaptive_stack_size_tape(): x = ti.ndarray(dtype=float, shape=(1), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -562,6 +580,7 @@ def test_more_inner_loops_local_variable(x: ti.types.ndarray(), arr: ti.types.nd @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=32) +@pytest.mark.flaky(reruns=5) def test_more_inner_loops_local_variable_fixed_stack_size_tape(): x = ti.ndarray(dtype=float, shape=(1), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -589,6 +608,7 @@ def test_more_inner_loops_local_variable(x: ti.types.ndarray(), arr: ti.types.nd @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=32) +@pytest.mark.flaky(reruns=5) def test_stacked_inner_loops_local_variable_fixed_stack_size_kernel_grad(): x = ti.ndarray(dtype=float, shape=(), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -621,6 +641,7 @@ def test_stacked_inner_loops_local_variable( @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=32) +@pytest.mark.flaky(reruns=5) def test_stacked_mixed_ib_and_non_ib_inner_loops_local_variable_fixed_stack_size_kernel_grad(): x = ti.ndarray(dtype=float, shape=(), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -654,6 +675,7 @@ def test_stacked_mixed_ib_and_non_ib_inner_loops_local_variable( @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=0) +@pytest.mark.flaky(reruns=5) def test_stacked_inner_loops_local_variable_adaptive_stack_size_kernel_grad(): x = ti.ndarray(dtype=float, shape=(), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -720,6 +742,7 @@ def test_stacked_mixed_ib_and_non_ib_inner_loops_local_variable( @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=0) +@pytest.mark.flaky(reruns=5) def test_large_for_loops_adaptive_stack_size(): x = ti.ndarray(dtype=float, shape=(), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -740,6 +763,7 @@ def test_large_loop(x: ti.types.ndarray(), arr: ti.types.ndarray(), loss: ti.typ @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=1) +@pytest.mark.flaky(reruns=5) def test_large_for_loops_fixed_stack_size(): x = ti.ndarray(dtype=float, shape=(), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -760,6 +784,7 @@ def test_large_loop(x: ti.types.ndarray(), arr: ti.types.ndarray(), loss: ti.typ @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_multiple_ib(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -781,6 +806,7 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_multiple_ib_multiple_outermost(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -807,6 +833,7 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_multiple_ib_multiple_outermost_mixed(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -835,6 +862,7 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_multiple_ib_mixed(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -860,6 +888,7 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_multiple_ib_deeper(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -886,6 +915,7 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_multiple_ib_deeper_non_scalar(): N = 10 x = ti.ndarray(float, shape=N, needs_grad=True) @@ -915,6 +945,7 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_multiple_ib_inner_mixed(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -945,6 +976,7 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ib_global_load(): N = 10 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -975,6 +1007,7 @@ def compute(a: ti.types.ndarray(), b: ti.types.ndarray(), p: ti.types.ndarray()) @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ad_if_simple(): x = ti.ndarray(ti.f32, shape=(), needs_grad=True) y = ti.ndarray(ti.f32, shape=(), needs_grad=True) @@ -994,6 +1027,7 @@ def func(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ad_if(): x = ti.ndarray(ti.f32, shape=2, needs_grad=True) y = ti.ndarray(ti.f32, shape=2, needs_grad=True) @@ -1020,6 +1054,7 @@ def func(i: ti.i32, x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ad_if_nested(): n = 20 x = ti.ndarray(ti.f32, shape=n, needs_grad=True) @@ -1056,6 +1091,7 @@ def func(x: ti.types.ndarray(), y: ti.types.ndarray(), z: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ad_if_mutable(): x = ti.ndarray(ti.f32, shape=2, needs_grad=True) y = ti.ndarray(ti.f32, shape=2, needs_grad=True) @@ -1083,6 +1119,7 @@ def func(i: ti.i32, x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ad_if_parallel(): x = ti.ndarray(ti.f32, shape=2, needs_grad=True) y = ti.ndarray(ti.f32, shape=2, needs_grad=True) @@ -1109,6 +1146,7 @@ def func(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ad_if_parallel_f64(): x = ti.ndarray(ti.f64, shape=2, needs_grad=True) y = ti.ndarray(ti.f64, shape=2, needs_grad=True) @@ -1135,6 +1173,7 @@ def func(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) +@pytest.mark.flaky(reruns=5) def test_ad_if_parallel_complex(): x = ti.ndarray(ti.f32, shape=2, needs_grad=True) y = ti.ndarray(ti.f32, shape=2, needs_grad=True) @@ -1161,12 +1200,14 @@ def func(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_ad_ndarray_i32(): with pytest.raises(TaichiRuntimeError, match=r"i32 is not supported for ndarray"): ti.ndarray(ti.i32, shape=3, needs_grad=True) @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_ad_sum_vector(): N = 10 @@ -1194,6 +1235,7 @@ def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_ad_multiple_tapes(): N = 10 @@ -1231,6 +1273,7 @@ def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_ad_set_loss_grad(): x = ti.ndarray(dtype=ti.f32, shape=(), needs_grad=True) loss = ti.ndarray(dtype=ti.f32, shape=(), needs_grad=True) @@ -1263,6 +1306,7 @@ def compute_3(x: ti.types.ndarray(), loss: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_ad_mixed_with_torch(): @test_utils.torch_op(output_shapes=[(1,)]) @ti.kernel @@ -1282,6 +1326,7 @@ def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_ad_tape_throw(): N = 4 @@ -1323,6 +1368,7 @@ def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_tape_torch_tensor_grad_none(): N = 3 @@ -1347,6 +1393,7 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_grad_tensor_in_kernel(): N = 10 @@ -1368,6 +1415,7 @@ def test(x: ti.types.ndarray(), b: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_tensor_shape(): N = 3 @@ -1392,6 +1440,7 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_ndarray_needs_grad_false(): N = 3 @@ -1416,6 +1465,7 @@ def test(x: ti.types.ndarray(needs_grad=False), y: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_torch_needs_grad_false(): N = 3 @@ -1439,6 +1489,7 @@ def test(x: ti.types.ndarray(needs_grad=False), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_ad_vector_arg(): N = 10 @@ -1467,6 +1518,7 @@ def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray(), z: ti.math.vec2): @test_utils.test(arch=archs_support_ndarray_ad) +@pytest.mark.flaky(reruns=5) def test_hash_encoder_simple(): @ti.kernel def hash_encoder_kernel( From 015e6c23cbf299d26bfd5a540de5294606798deb Mon Sep 17 00:00:00 2001 From: Hugh Perkins Date: Tue, 10 Jun 2025 08:42:07 -0700 Subject: [PATCH 2/4] use single module level mark --- tests/python/test_ad_ndarray.py | 57 +++------------------------------ 1 file changed, 5 insertions(+), 52 deletions(-) diff --git a/tests/python/test_ad_ndarray.py b/tests/python/test_ad_ndarray.py index e29c1aa4ee..340e2f32be 100644 --- a/tests/python/test_ad_ndarray.py +++ b/tests/python/test_ad_ndarray.py @@ -9,12 +9,14 @@ if has_pytorch(): import torch + +pytestmark = pytest.mark.flaky(retries=5) + archs_support_ndarray_ad = [ti.cpu, ti.cuda] @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) -@pytest.mark.flaky(reruns=5) def test_simple_demo(): @test_utils.torch_op(output_shapes=[(1,)]) @ti.kernel @@ -32,7 +34,6 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) -@pytest.mark.flaky(reruns=5) def test_ad_reduce(): @test_utils.torch_op(output_shapes=[(1,)]) @ti.kernel @@ -87,7 +88,6 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray()): ) @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) -@pytest.mark.flaky(reruns=5) def test_poly(tifunc): s = (4,) @@ -104,7 +104,6 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) -@pytest.mark.flaky(reruns=5) def test_ad_select(): s = (4,) @@ -121,7 +120,6 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray(), z: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) -@pytest.mark.flaky(reruns=5) def test_ad_sum(): N = 10 @@ -153,7 +151,6 @@ def compute_sum(a: ti.types.ndarray(), b: ti.types.ndarray(), p: ti.types.ndarra @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64) -@pytest.mark.flaky(reruns=5) def test_ad_sum_local_atomic(): N = 10 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -185,7 +182,6 @@ def compute_sum(a: ti.types.ndarray(), b: ti.types.ndarray(), p: ti.types.ndarra @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ad_power(): N = 10 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -217,7 +213,6 @@ def power(a: ti.types.ndarray(), b: ti.types.ndarray(), p: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ad_fibonacci(): N = 15 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -256,7 +251,6 @@ def fib(a: ti.types.ndarray(), b: ti.types.ndarray(), c: ti.types.ndarray(), f: @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f32, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ad_fibonacci_index(): N = 5 M = 10 @@ -289,7 +283,6 @@ def fib(a: ti.types.ndarray(), b: ti.types.ndarray(), f: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_integer_stack(): N = 5 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -329,7 +322,6 @@ def int_stack(a: ti.types.ndarray(), b: ti.types.ndarray(), c: ti.types.ndarray( @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_double_for_loops(): N = 5 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -368,7 +360,6 @@ def double_for(a: ti.types.ndarray(), b: ti.types.ndarray(), c: ti.types.ndarray @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_double_for_loops_more_nests(): N = 6 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -415,7 +406,6 @@ def double_for(a: ti.types.ndarray(), b: ti.types.ndarray(), c: ti.types.ndarray @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_complex_body(): N = 5 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -455,7 +445,6 @@ def complex(a: ti.types.ndarray(), c: ti.types.ndarray(), f: ti.types.ndarray(), @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_mixed_inner_loops(): x = ti.ndarray(dtype=ti.f32, shape=(1,), needs_grad=True) arr = ti.ndarray(dtype=ti.f32, shape=(5)) @@ -478,7 +467,6 @@ def mixed_inner_loops(x: ti.types.ndarray(), arr: ti.types.ndarray(), loss: ti.t @test_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f64, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_mixed_inner_loops_tape(): x = ti.ndarray(dtype=ti.f32, shape=(1,), needs_grad=True) arr = ti.ndarray(dtype=ti.f32, shape=(5)) @@ -499,7 +487,6 @@ def mixed_inner_loops_tape(x: ti.types.ndarray(), arr: ti.types.ndarray(), loss: @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=32) -@pytest.mark.flaky(reruns=5) def test_inner_loops_local_variable_fixed_stack_size_kernel_grad(): x = ti.ndarray(dtype=float, shape=(1), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -526,7 +513,6 @@ def test_inner_loops_local_variable(x: ti.types.ndarray(), arr: ti.types.ndarray @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=0) -@pytest.mark.flaky(reruns=5) def test_inner_loops_local_variable_adaptive_stack_size_tape(): x = ti.ndarray(dtype=float, shape=(1), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -552,7 +538,6 @@ def test_inner_loops_local_variable(x: ti.types.ndarray(), arr: ti.types.ndarray @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=0) -@pytest.mark.flaky(reruns=5) def test_more_inner_loops_local_variable_adaptive_stack_size_tape(): x = ti.ndarray(dtype=float, shape=(1), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -580,7 +565,6 @@ def test_more_inner_loops_local_variable(x: ti.types.ndarray(), arr: ti.types.nd @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=32) -@pytest.mark.flaky(reruns=5) def test_more_inner_loops_local_variable_fixed_stack_size_tape(): x = ti.ndarray(dtype=float, shape=(1), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -608,7 +592,6 @@ def test_more_inner_loops_local_variable(x: ti.types.ndarray(), arr: ti.types.nd @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=32) -@pytest.mark.flaky(reruns=5) def test_stacked_inner_loops_local_variable_fixed_stack_size_kernel_grad(): x = ti.ndarray(dtype=float, shape=(), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -641,7 +624,6 @@ def test_stacked_inner_loops_local_variable( @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=32) -@pytest.mark.flaky(reruns=5) def test_stacked_mixed_ib_and_non_ib_inner_loops_local_variable_fixed_stack_size_kernel_grad(): x = ti.ndarray(dtype=float, shape=(), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -675,7 +657,6 @@ def test_stacked_mixed_ib_and_non_ib_inner_loops_local_variable( @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=0) -@pytest.mark.flaky(reruns=5) def test_stacked_inner_loops_local_variable_adaptive_stack_size_kernel_grad(): x = ti.ndarray(dtype=float, shape=(), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -742,7 +723,6 @@ def test_stacked_mixed_ib_and_non_ib_inner_loops_local_variable( @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=0) -@pytest.mark.flaky(reruns=5) def test_large_for_loops_adaptive_stack_size(): x = ti.ndarray(dtype=float, shape=(), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -763,7 +743,6 @@ def test_large_loop(x: ti.types.ndarray(), arr: ti.types.ndarray(), loss: ti.typ @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack, ad_stack_size=1) -@pytest.mark.flaky(reruns=5) def test_large_for_loops_fixed_stack_size(): x = ti.ndarray(dtype=float, shape=(), needs_grad=True) arr = ti.ndarray(dtype=float, shape=(2), needs_grad=True) @@ -784,7 +763,6 @@ def test_large_loop(x: ti.types.ndarray(), arr: ti.types.ndarray(), loss: ti.typ @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_multiple_ib(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -806,7 +784,6 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_multiple_ib_multiple_outermost(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -833,7 +810,6 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_multiple_ib_multiple_outermost_mixed(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -862,7 +838,6 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_multiple_ib_mixed(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -888,7 +863,6 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_multiple_ib_deeper(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -915,7 +889,6 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_multiple_ib_deeper_non_scalar(): N = 10 x = ti.ndarray(float, shape=N, needs_grad=True) @@ -945,7 +918,6 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_multiple_ib_inner_mixed(): x = ti.ndarray(float, (), needs_grad=True) y = ti.ndarray(float, (), needs_grad=True) @@ -976,7 +948,6 @@ def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ib_global_load(): N = 10 a = ti.ndarray(ti.f32, shape=N, needs_grad=True) @@ -1007,7 +978,6 @@ def compute(a: ti.types.ndarray(), b: ti.types.ndarray(), p: ti.types.ndarray()) @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ad_if_simple(): x = ti.ndarray(ti.f32, shape=(), needs_grad=True) y = ti.ndarray(ti.f32, shape=(), needs_grad=True) @@ -1027,7 +997,6 @@ def func(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ad_if(): x = ti.ndarray(ti.f32, shape=2, needs_grad=True) y = ti.ndarray(ti.f32, shape=2, needs_grad=True) @@ -1054,7 +1023,6 @@ def func(i: ti.i32, x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ad_if_nested(): n = 20 x = ti.ndarray(ti.f32, shape=n, needs_grad=True) @@ -1091,7 +1059,6 @@ def func(x: ti.types.ndarray(), y: ti.types.ndarray(), z: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ad_if_mutable(): x = ti.ndarray(ti.f32, shape=2, needs_grad=True) y = ti.ndarray(ti.f32, shape=2, needs_grad=True) @@ -1119,7 +1086,6 @@ def func(i: ti.i32, x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ad_if_parallel(): x = ti.ndarray(ti.f32, shape=2, needs_grad=True) y = ti.ndarray(ti.f32, shape=2, needs_grad=True) @@ -1146,7 +1112,6 @@ def func(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ad_if_parallel_f64(): x = ti.ndarray(ti.f64, shape=2, needs_grad=True) y = ti.ndarray(ti.f64, shape=2, needs_grad=True) @@ -1173,7 +1138,6 @@ def func(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack) -@pytest.mark.flaky(reruns=5) def test_ad_if_parallel_complex(): x = ti.ndarray(ti.f32, shape=2, needs_grad=True) y = ti.ndarray(ti.f32, shape=2, needs_grad=True) @@ -1199,15 +1163,15 @@ def func(x: ti.types.ndarray(), y: ti.types.ndarray()): assert x.grad[1] == -0.25 +@pytest.mark.flaky(retries=5) @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_ad_ndarray_i32(): with pytest.raises(TaichiRuntimeError, match=r"i32 is not supported for ndarray"): ti.ndarray(ti.i32, shape=3, needs_grad=True) @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) +@pytest.mark.flaky(retries=5) def test_ad_sum_vector(): N = 10 @@ -1235,7 +1199,6 @@ def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_ad_multiple_tapes(): N = 10 @@ -1273,7 +1236,6 @@ def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_ad_set_loss_grad(): x = ti.ndarray(dtype=ti.f32, shape=(), needs_grad=True) loss = ti.ndarray(dtype=ti.f32, shape=(), needs_grad=True) @@ -1306,7 +1268,6 @@ def compute_3(x: ti.types.ndarray(), loss: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_ad_mixed_with_torch(): @test_utils.torch_op(output_shapes=[(1,)]) @ti.kernel @@ -1326,7 +1287,6 @@ def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_ad_tape_throw(): N = 4 @@ -1368,7 +1328,6 @@ def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_tape_torch_tensor_grad_none(): N = 3 @@ -1393,7 +1352,6 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_grad_tensor_in_kernel(): N = 10 @@ -1415,7 +1373,6 @@ def test(x: ti.types.ndarray(), b: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_tensor_shape(): N = 3 @@ -1440,7 +1397,6 @@ def test(x: ti.types.ndarray(), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_ndarray_needs_grad_false(): N = 3 @@ -1465,7 +1421,6 @@ def test(x: ti.types.ndarray(needs_grad=False), y: ti.types.ndarray()): @pytest.mark.skipif(not has_pytorch(), reason="Pytorch not installed.") @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_torch_needs_grad_false(): N = 3 @@ -1489,7 +1444,6 @@ def test(x: ti.types.ndarray(needs_grad=False), y: ti.types.ndarray()): @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_ad_vector_arg(): N = 10 @@ -1518,7 +1472,6 @@ def compute_sum(a: ti.types.ndarray(), p: ti.types.ndarray(), z: ti.math.vec2): @test_utils.test(arch=archs_support_ndarray_ad) -@pytest.mark.flaky(reruns=5) def test_hash_encoder_simple(): @ti.kernel def hash_encoder_kernel( From 72228d970b4a39046aa652e551f5ad5490d537d9 Mon Sep 17 00:00:00 2001 From: Hugh Perkins Date: Tue, 10 Jun 2025 09:49:43 -0700 Subject: [PATCH 3/4] mark sparse matrix flaky --- tests/python/test_sparse_matrix.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/python/test_sparse_matrix.py b/tests/python/test_sparse_matrix.py index be3d8f366f..863187f101 100644 --- a/tests/python/test_sparse_matrix.py +++ b/tests/python/test_sparse_matrix.py @@ -3,6 +3,8 @@ import taichi as ti from tests import test_utils +pytestmark = pytest.mark.flaky(retries=5) + @pytest.mark.parametrize( "dtype, storage_format", From e0c2f9dae6ae2a72b73b8012ab46007f7328cbd7 Mon Sep 17 00:00:00 2001 From: Hugh Perkins Date: Tue, 10 Jun 2025 11:36:32 -0700 Subject: [PATCH 4/4] dummy commit