diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index 65a9643288..545b3be65f 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -17,7 +17,7 @@ jobs: # to do this, we need to run cmake with the following options: # -DCMAKE_EXPORT_COMPILE_COMMANDS=ON # I'm fairly sure that we don't need to run the actual build, - # but it's not obvious to me how to do this. So, I'm just going + # but it's not obvious to me how to do this. So, I'm just going # to run a full build for now, and we can FIXME this later. - name: Prequisites for build run: | diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml new file mode 100644 index 0000000000..55ee691f14 --- /dev/null +++ b/.github/workflows/linters.yml @@ -0,0 +1,19 @@ +name: Linters, using linux +on: + workflow_dispatch: + pull_request: + branches: + - main +jobs: + build: + name: Linters + runs-on: ubuntu-22.04 + steps: + - name: Python check + uses: actions/setup-python@v4 + with: + python-version: '3.10' + - uses: actions/checkout@v4 + - name: Run linters + run: | + bash .github/workflows/scripts_new/linters.sh diff --git a/.github/workflows/linux_x86.yml b/.github/workflows/linux_x86.yml index 2902822d5b..21623204e1 100644 --- a/.github/workflows/linux_x86.yml +++ b/.github/workflows/linux_x86.yml @@ -1,4 +1,4 @@ -name: Linux x86 on-demand +name: Linux x86 on: pull_request: branches: @@ -6,6 +6,7 @@ on: workflow_dispatch: jobs: build: + name: Linux x86 Build runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/scripts/ti_build/vulkan.py b/.github/workflows/scripts/ti_build/vulkan.py index e7e8d06796..b75a80fc41 100644 --- a/.github/workflows/scripts/ti_build/vulkan.py +++ b/.github/workflows/scripts/ti_build/vulkan.py @@ -19,7 +19,9 @@ def setup_vulkan(): u = platform.uname() if u.system == "Linux": - url = f"https://sdk.lunarg.com/sdk/download/{VULKAN_VERSION}/linux/vulkansdk-linux-x86_64-{VULKAN_VERSION}.tar.xz" + url = ( + f"https://sdk.lunarg.com/sdk/download/{VULKAN_VERSION}/linux/vulkansdk-linux-x86_64-{VULKAN_VERSION}.tar.xz" + ) prefix = get_cache_home() / f"vulkan-{VULKAN_VERSION}" download_dep(url, prefix, strip=1) diff --git a/.github/workflows/scripts_new/linters.sh b/.github/workflows/scripts_new/linters.sh new file mode 100644 index 0000000000..9adf829aeb --- /dev/null +++ b/.github/workflows/scripts_new/linters.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -ex + +python -V +pwd +ls +uname -a + +# python + C++ +# ============= + +pip install pre-commit +pre-commit run -a --show-diff + +# python +# ====== + +pip install pyright +# Need to deal with C++ linkage issues first (and possibly +# some other things), before we can turn on pyright + +pip install isort +# TODO: run isort on all python files, and commit those, then +# uncomment the following line: +# isort --check-only --diff python + +# C++ +# === + +# TODO: figure out how to run clang-tidy diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d6d70a6617..55d0e839e4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ ci: autoupdate_commit_msg: '[misc] Update pre-commit hooks' default_language_version: - python: python3.12 + python: python3.10 exclude: ^((tests/python/test_exception)\.py$|external/) repos: @@ -11,7 +11,7 @@ repos: rev: 25.1.0 hooks: - id: black - language_version: python3.12 + language_version: python3.10 args: ['-l', '120'] - repo: https://github.com/pre-commit/mirrors-clang-format diff --git a/python/taichi/tools/vtk.py b/python/taichi/tools/vtk.py index 596ec3c308..ef4e4832d8 100644 --- a/python/taichi/tools/vtk.py +++ b/python/taichi/tools/vtk.py @@ -22,6 +22,8 @@ def write_vtk(scalar_field, filename): zcoords = np.array([0, 1]) elif dimensions == 3: zcoords = np.arange(0, field_shape[2]) + else: + raise ValueError("dimensions should be 2 or 3") gridToVTK( filename, x=np.arange(0, field_shape[0]), diff --git a/python/tools/markdown_link_check.py b/python/tools/markdown_link_check.py index 6f0ed9f52a..0c12c500df 100644 --- a/python/tools/markdown_link_check.py +++ b/python/tools/markdown_link_check.py @@ -6,10 +6,11 @@ error_found = False # Track if any errors are found + def check_markdown_links(file_path, base_dir=None): """ Check all links in a Markdown file, including anchor references. - + Args: file_path: Path to the Markdown file base_dir: Base directory for relative links (defaults to file's directory) @@ -17,95 +18,91 @@ def check_markdown_links(file_path, base_dir=None): global error_found if base_dir is None: base_dir = os.path.dirname(os.path.abspath(file_path)) - - with open(file_path, 'r', encoding='utf-8') as f: + + with open(file_path, "r", encoding="utf-8") as f: content = f.read() - + # Find all links and image references - link_pattern = r'\[.*?\]\((.*?)\)|!\[.*?\]\((.*?)\)' + link_pattern = r"\[.*?\]\((.*?)\)|!\[.*?\]\((.*?)\)" matches = re.findall(link_pattern, content) - + # Combine both capturing groups (links and images) links = [match[0] or match[1] for match in matches if match[0] or match[1]] - + for link in links: parsed = urlparse(link) - + # Skip mailto and external links - if parsed.scheme in ('http', 'https', 'mailto'): + if parsed.scheme in ("http", "https", "mailto"): print(f"[-] External link (not checked): {link}") continue - + # Handle anchor-only links if not parsed.path and parsed.fragment: check_anchor(file_path, parsed.fragment) continue - + # Handle relative paths if not parsed.scheme and not parsed.netloc: full_path = os.path.normpath(os.path.join(base_dir, parsed.path)) - + # Check if file exists if not os.path.exists(full_path): print(f"❌ Broken link: {link} (File not found: {full_path})") error_found = True continue - + # Check anchor in local file if parsed.fragment: - if full_path.endswith('.md'): + if full_path.endswith(".md"): check_anchor(full_path, parsed.fragment) else: # For non-markdown files, we can't check anchors print(f"⚠️ Anchor in non-Markdown file (not checked): {link}") + def check_anchor(md_file_path, anchor): """ Check if an anchor exists in a Markdown file. - + Args: md_file_path: Path to the Markdown file anchor: Anchor to check (without #) """ global error_found try: - with open(md_file_path, 'r', encoding='utf-8') as f: + with open(md_file_path, "r", encoding="utf-8") as f: content = f.read() - + # Improved anchor cleaning: remove non-alphanum except hyphens, collapse multiple hyphens, strip hyphens def clean_anchor(s): - s = s.lower().replace(' ', '-') - s = re.sub(r'[^a-z0-9\-]', '', s) - s = re.sub(r'-+', '-', s) - s = s.strip('-') + s = s.lower().replace(" ", "-") + s = re.sub(r"[^a-z0-9\-]", "", s) + s = re.sub(r"-+", "-", s) + s = s.strip("-") return s normalized_anchor = clean_anchor(anchor) - + # Pattern for Markdown headers - header_pattern = r'^#+\s+(.*)$' - + header_pattern = r"^#+\s+(.*)$" + found = False available_anchors = [] - for line in content.split('\n'): + for line in content.split("\n"): match = re.match(header_pattern, line) if match: header_text = match.group(1) anchor_dash = clean_anchor(header_text) - anchor_underscore = re.sub(r'[^a-z0-9\-]', '', header_text.lower().replace(' ', '_')) - anchor_nospace = re.sub(r'[^a-z0-9\-]', '', header_text.replace(' ', '')) - anchor_raw = re.sub(r'[^a-z0-9\-]', '', header_text) - possible_anchors = [ - anchor_dash, - anchor_underscore, - anchor_nospace, - anchor_raw - ] + anchor_underscore = re.sub(r"[^a-z0-9\-]", "", header_text.lower().replace(" ", "_")) + anchor_nospace = re.sub(r"[^a-z0-9\-]", "", header_text.replace(" ", "")) + anchor_raw = re.sub(r"[^a-z0-9\-]", "", header_text) + possible_anchors = [anchor_dash, anchor_underscore, anchor_nospace, anchor_raw] available_anchors.append(anchor_dash) if normalized_anchor in possible_anchors: found = True break - + if not found: print(f"❌ Broken anchor: #{anchor} in {md_file_path}") print(f" Available anchors in this file:") @@ -115,6 +112,7 @@ def clean_anchor(s): except Exception as e: print(f"⚠️ Error checking anchor #{anchor} in {md_file_path}: {str(e)}") + def find_markdown_files(root_dir): """ Recursively find all .md files under root_dir. @@ -122,11 +120,12 @@ def find_markdown_files(root_dir): md_files = [] for dirpath, _, filenames in os.walk(root_dir): for filename in filenames: - if filename.lower().endswith('.md'): + if filename.lower().endswith(".md"): md_files.append(os.path.join(dirpath, filename)) return md_files -if __name__ == '__main__': + +if __name__ == "__main__": parser = argparse.ArgumentParser(description="Check Markdown links in a directory or a single Markdown file.") parser.add_argument("path", help="Path to the root directory or a Markdown file") args = parser.parse_args() @@ -139,7 +138,7 @@ def find_markdown_files(root_dir): if not md_files: print(f"No Markdown files found in {input_path}") exit(0) - elif os.path.isfile(input_path) and input_path.lower().endswith('.md'): + elif os.path.isfile(input_path) and input_path.lower().endswith(".md"): md_files = [input_path] else: print(f"Error: {input_path} is not a directory or a Markdown (.md) file.") diff --git a/taichi/program/sparse_solver.cpp b/taichi/program/sparse_solver.cpp index c775847271..3a8cea13e4 100644 --- a/taichi/program/sparse_solver.cpp +++ b/taichi/program/sparse_solver.cpp @@ -221,17 +221,17 @@ void CuSparseSolver::reorder(const CuSparseMatrix &A) { assert(nullptr != h_csr_val_B_); assert(nullptr != h_map_B_from_A_); - CUDADriver::get_instance().memcpy_device_to_host(h_csr_row_ptr_B_, d_csrRowPtrA, - sizeof(int) * (rowsA + 1)); - CUDADriver::get_instance().memcpy_device_to_host(h_csr_col_ind_B_, d_csrColIndA, - sizeof(int) * nnzA); + CUDADriver::get_instance().memcpy_device_to_host( + h_csr_row_ptr_B_, d_csrRowPtrA, sizeof(int) * (rowsA + 1)); + CUDADriver::get_instance().memcpy_device_to_host( + h_csr_col_ind_B_, d_csrColIndA, sizeof(int) * nnzA); CUDADriver::get_instance().memcpy_device_to_host(h_csrValA, d_csrValA, sizeof(float) * nnzA); // compoute h_Q_ - CUSOLVERDriver::get_instance().csSpXcsrsymamdHost(cusolver_handle_, rowsA, - nnzA, descr_, h_csr_row_ptr_B_, - h_csr_col_ind_B_, h_Q_); + CUSOLVERDriver::get_instance().csSpXcsrsymamdHost( + cusolver_handle_, rowsA, nnzA, descr_, h_csr_row_ptr_B_, h_csr_col_ind_B_, + h_Q_); CUDADriver::get_instance().malloc((void **)&d_Q_, sizeof(int) * colsA); CUDADriver::get_instance().memcpy_host_to_device((void *)d_Q_, (void *)h_Q_, sizeof(int) * (colsA)); @@ -255,9 +255,11 @@ void CuSparseSolver::reorder(const CuSparseMatrix &A) { sizeof(int) * (rowsA + 1)); CUDADriver::get_instance().malloc((void **)&d_csr_col_ind_B_, sizeof(int) * nnzA); - CUDADriver::get_instance().malloc((void **)&d_csr_val_B_, sizeof(float) * nnzA); - CUDADriver::get_instance().memcpy_host_to_device( - (void *)d_csr_row_ptr_B_, (void *)h_csr_row_ptr_B_, sizeof(int) * (rowsA + 1)); + CUDADriver::get_instance().malloc((void **)&d_csr_val_B_, + sizeof(float) * nnzA); + CUDADriver::get_instance().memcpy_host_to_device((void *)d_csr_row_ptr_B_, + (void *)h_csr_row_ptr_B_, + sizeof(int) * (rowsA + 1)); CUDADriver::get_instance().memcpy_host_to_device( (void *)d_csr_col_ind_B_, (void *)h_csr_col_ind_B_, sizeof(int) * nnzA); CUDADriver::get_instance().memcpy_host_to_device( diff --git a/taichi/program/sparse_solver.h b/taichi/program/sparse_solver.h index 57b2203d5f..48da79d3b4 100644 --- a/taichi/program/sparse_solver.h +++ b/taichi/program/sparse_solver.h @@ -97,12 +97,12 @@ class CuSparseSolver : public SparseSolver { int *d_Q_{nullptr}; int *h_csr_row_ptr_B_{nullptr}; /* n+1 */ int *h_csr_col_ind_B_{nullptr}; /* nnzA */ - float *h_csr_val_B_{nullptr}; /* nnzA */ + float *h_csr_val_B_{nullptr}; /* nnzA */ int *h_map_B_from_A_{nullptr}; /* nnzA */ int *d_csr_row_ptr_B_{nullptr}; /* n+1 */ int *d_csr_col_ind_B_{nullptr}; /* nnzA */ - float *d_csr_val_B_{nullptr}; /* nnzA */ - // NOLINTEND + float *d_csr_val_B_{nullptr}; /* nnzA */ + // NOLINTEND public: CuSparseSolver(); explicit CuSparseSolver(SolverType solver_type) : solver_type_(solver_type) { diff --git a/taichi/python/export_lang.cpp b/taichi/python/export_lang.cpp index f4a209290c..bf56db31d8 100644 --- a/taichi/python/export_lang.cpp +++ b/taichi/python/export_lang.cpp @@ -350,11 +350,11 @@ void export_lang(py::module &m) { .def("insert_snode_access_flag", &ASTBuilder::insert_snode_access_flag) .def("reset_snode_access_flag", &ASTBuilder::reset_snode_access_flag); - auto device_capability_config = py::class_( - m, "DeviceCapabilityConfig"); + auto device_capability_config = + py::class_(m, "DeviceCapabilityConfig"); - auto compiled_kernel_data = py::class_( - m, "CompiledKernelData"); + auto compiled_kernel_data = + py::class_(m, "CompiledKernelData"); py::class_(m, "Program") .def(py::init<>()) diff --git a/tests/python/test_tools_markdown_check.py b/tests/python/test_tools_markdown_check.py index cc5a0ed7a9..fa2d525567 100644 --- a/tests/python/test_tools_markdown_check.py +++ b/tests/python/test_tools_markdown_check.py @@ -18,6 +18,7 @@ def temp_dir(): with tempfile.TemporaryDirectory() as d: yield d + def write_md(base_dir, filename, content): path = os.path.join(base_dir, filename) os.makedirs(os.path.dirname(path), exist_ok=True) @@ -25,6 +26,7 @@ def write_md(base_dir, filename, content): f.write(content) return path + def test_find_markdown_files(temp_dir): write_md(temp_dir, "a.md", "# Title") write_md(temp_dir, "b.txt", "not markdown") @@ -35,6 +37,7 @@ def test_find_markdown_files(temp_dir): assert any(f.endswith("a.md") for f in files) assert any(f.endswith("c.md") for f in files) + def test_check_markdown_links_valid(temp_dir, capsys): md = "# Title\n[Link](other.md)\n" other = "# Other" @@ -44,6 +47,7 @@ def test_check_markdown_links_valid(temp_dir, capsys): out = capsys.readouterr().out assert "❌" not in out + def test_check_markdown_links_broken_file(temp_dir, capsys): md = "# Title\n[Missing](missing.md)\n" write_md(temp_dir, "main.md", md) @@ -51,6 +55,7 @@ def test_check_markdown_links_broken_file(temp_dir, capsys): out = capsys.readouterr().out assert "❌ Broken link" in out + def test_check_anchor_found(temp_dir, capsys): md = "# My Header\n" path = write_md(temp_dir, "doc.md", md) @@ -58,6 +63,7 @@ def test_check_anchor_found(temp_dir, capsys): out = capsys.readouterr().out assert "❌" not in out + def test_check_anchor_not_found(temp_dir, capsys): md = "# My Header\n" path = write_md(temp_dir, "doc.md", md) @@ -65,6 +71,7 @@ def test_check_anchor_not_found(temp_dir, capsys): out = capsys.readouterr().out assert "❌ Broken anchor" in out + def test_check_anchor_symbol_removal(temp_dir, capsys): md = "# My `Header`.\n" path = write_md(temp_dir, "doc.md", md) @@ -72,6 +79,7 @@ def test_check_anchor_symbol_removal(temp_dir, capsys): out = capsys.readouterr().out assert "❌" not in out + def test_external_and_mailto_links(temp_dir, capsys): md = "# Title\n[Google](https://google.com)\n[Email](mailto:test@example.com)\n" path = write_md(temp_dir, "main.md", md) @@ -79,6 +87,7 @@ def test_external_and_mailto_links(temp_dir, capsys): out = capsys.readouterr().out assert "External link" in out + def test_anchor_only_link(temp_dir, capsys): md = "# Section 1\n[Go](#section-1)\n" path = write_md(temp_dir, "main.md", md) @@ -89,7 +98,7 @@ def test_anchor_only_link(temp_dir, capsys): def test_pr_review(temp_dir, capsys): md = """ - + - [PR review & merging checklist](#pr-review-merging-checklist) ### PR review & merging checklist