Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,14 @@ jobs:
${{ matrix.os }}-${{ matrix.python-version }}-build-${{ env.cache-name }}-

- name: Run pre-commit
run: pre-commit run -a
run: |
pre-commit run -a || PRE_COMMIT_EXIT_CODE=$?
if [ -n "$PRE_COMMIT_EXIT_CODE" ]; then
echo "Pre-commit failed with exit code $PRE_COMMIT_EXIT_CODE"
echo "Showing git diff:"
git --no-pager diff
exit $PRE_COMMIT_EXIT_CODE
fi

lint-matrix:
needs: [ pre-commit ]
Expand Down
6 changes: 6 additions & 0 deletions copier.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,12 @@ template_might_want_to_use_vcrpy:
default: no
when: "{{ template_uses_python }}"

template_might_want_to_use_python_asyncio:
type: bool
help: Is this template for something that might want to use Python asyncio?
default: no
when: "{{ template_uses_python }}"


_min_copier_version: "9.4"

Expand Down
18 changes: 14 additions & 4 deletions template/.github/workflows/ci.yaml.jinja-base
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ jobs:
contents: write # needed for updating dependabot branches

pre-commit:
needs: [ get-values ]
needs:
- get-values
uses: ./.github/workflows/pre-commit.yaml
permissions:
contents: write # needed for mutex
Expand All @@ -29,7 +30,8 @@ jobs:
python-version: {% endraw %}{{ python_version }}{% raw %}

lint-matrix:
needs: [ pre-commit ]
needs:
- pre-commit
strategy:
matrix:
os:
Expand Down Expand Up @@ -121,7 +123,13 @@ jobs:
- name: Run pre-commit
run: |
# skip devcontainer context hash because the template instantiation may make it different every time
SKIP=git-dirty,compute-devcontainer-context-hash pre-commit run -a
SKIP=git-dirty,compute-devcontainer-context-hash pre-commit run -a || PRE_COMMIT_EXIT_CODE=$?
if [ -n "$PRE_COMMIT_EXIT_CODE" ]; then
echo "Pre-commit failed with exit code $PRE_COMMIT_EXIT_CODE"
echo "Showing git diff:"
git --no-pager diff
exit $PRE_COMMIT_EXIT_CODE
fi

- name: Upload pre-commit log if failure
if: ${{ failure() }}
Expand All @@ -133,7 +141,9 @@ jobs:
required-check:
runs-on: {% endraw %}{{ gha_linux_runner }}{% raw %}
timeout-minutes: {% endraw %}{{ gha_short_timeout_minutes }}{% raw %}
needs: [ lint-matrix, get-values ]
needs:
- lint-matrix
- get-values
permissions:
statuses: write # needed for updating status on Dependabot PRs
if: always()
Expand Down
9 changes: 8 additions & 1 deletion template/.github/workflows/pre-commit.yaml.jinja-base
Original file line number Diff line number Diff line change
Expand Up @@ -69,4 +69,11 @@ jobs:
{% endraw %}{{ gha_linux_runner }}{% raw %}-py${{ inputs.python-version }}-node-${{ inputs.node-version}}-${{ env.cache-name }}-

- name: Run pre-commit
run: pre-commit run -a{% endraw %}
run: |
pre-commit run -a || PRE_COMMIT_EXIT_CODE=$?
if [ -n "$PRE_COMMIT_EXIT_CODE" ]; then
echo "Pre-commit failed with exit code $PRE_COMMIT_EXIT_CODE"
echo "Showing git diff:"
git --no-pager diff
exit $PRE_COMMIT_EXIT_CODE
fi{% endraw %}
4 changes: 4 additions & 0 deletions template/copier.yml.jinja-base
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,10 @@ install_aws_ssm_port_forwarding_plugin:
configure_vcrpy:
type: bool
help: Should VCRpy be configured for use during unit testing in Python?
default: no{% endraw %}{% endif %}{% if template_might_want_to_use_python_asyncio %}{% raw %}
configure_python_asyncio:
type: bool
help: Will python code be using asyncio?
default: no{% endraw %}{% endif %}{% raw %}
{% endraw %}
python_version:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import asyncio

import pytest
from backend_api.background_tasks import background_task_exceptions
from backend_api.background_tasks import background_tasks_set


async def _wait_for_tasks(tasks_list: list[asyncio.Task[None]]):
_, pending = await asyncio.wait(tasks_list, timeout=5.0)
if pending:
raise RuntimeError(f"There are still pending tasks: {pending}")
Comment on lines +8 to +11
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Consider making the timeout configurable.

The 5-second timeout is hardcoded, which might be too short for slow CI environments or unnecessarily long for local development. Consider making it configurable via an environment variable or fixture parameter.

Example:

+import os
+
+# At module level
+DEFAULT_TASK_TIMEOUT = float(os.getenv("ASYNCIO_TASK_TIMEOUT", "5.0"))
+
-async def _wait_for_tasks(tasks_list: list[asyncio.Task[None]]):
-    _, pending = await asyncio.wait(tasks_list, timeout=5.0)
+async def _wait_for_tasks(tasks_list: list[asyncio.Task[None]], timeout: float = DEFAULT_TASK_TIMEOUT):
+    _, pending = await asyncio.wait(tasks_list, timeout=timeout)
     if pending:
         raise RuntimeError(f"There are still pending tasks: {pending}")
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
async def _wait_for_tasks(tasks_list: list[asyncio.Task[None]]):
_, pending = await asyncio.wait(tasks_list, timeout=5.0)
if pending:
raise RuntimeError(f"There are still pending tasks: {pending}")
import os
# At module level
DEFAULT_TASK_TIMEOUT = float(os.getenv("ASYNCIO_TASK_TIMEOUT", "5.0"))
async def _wait_for_tasks(tasks_list: list[asyncio.Task[None]], timeout: float = DEFAULT_TASK_TIMEOUT):
_, pending = await asyncio.wait(tasks_list, timeout=timeout)
if pending:
raise RuntimeError(f"There are still pending tasks: {pending}")



@pytest.fixture(autouse=True)
def fail_on_background_task_errors():
"""Automatically fail tests if ANY background task raises an exception."""
background_task_exceptions.clear()

yield

# Wait for background tasks to complete (using asyncio.run for sync fixture)
if background_tasks_set:
tasks_list = list(background_tasks_set)
try:
loop = asyncio.get_running_loop()
except RuntimeError:
asyncio.run(_wait_for_tasks(tasks_list))
else:
loop.run_until_complete(_wait_for_tasks(tasks_list))
Comment on lines +24 to +29
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Fix the event loop handling to support async test functions.

The logic on Line 29 attempts to call loop.run_until_complete() on an already-running event loop, which will raise RuntimeError: "This event loop is already running". This occurs when the fixture runs after an async test (e.g., when using pytest-asyncio).

The fixture cannot call run_until_complete on a running loop, nor can it await (since it's not async). You need either:

  1. An async version of this fixture for async tests
  2. A different mechanism that doesn't require executing async code from the fixture teardown

Consider this approach if pytest-asyncio is available:

+import pytest_asyncio
+
+# For async tests
+@pytest_asyncio.fixture(autouse=True)
+async def fail_on_background_task_errors_async():
+    """Automatically fail async tests if ANY background task raises an exception."""
+    background_task_exceptions.clear()
+    
+    yield
+    
+    if background_tasks_set:
+        tasks_list = list(background_tasks_set)
+        await _wait_for_tasks(tasks_list)
+    
+    if background_task_exceptions:
+        pytest.fail(
+            f"Background tasks raised {len(background_task_exceptions)} exception(s):\n"
+            + "\n\n".join(f"{type(e).__name__}: {e}" for e in background_task_exceptions)
+        )
+
+# For sync tests  
 @pytest.fixture(autouse=True)
 def fail_on_background_task_errors():
     """Automatically fail tests if ANY background task raises an exception."""
     background_task_exceptions.clear()
 
     yield
 
-    # Wait for background tasks to complete (using asyncio.run for sync fixture)
+    # Wait for background tasks to complete (only for sync tests)
     if background_tasks_set:
         tasks_list = list(background_tasks_set)
-        try:
-            loop = asyncio.get_running_loop()
-        except RuntimeError:
-            asyncio.run(_wait_for_tasks(tasks_list))
-        else:
-            loop.run_until_complete(_wait_for_tasks(tasks_list))
+        asyncio.run(_wait_for_tasks(tasks_list))
 
     # Fail if any exceptions occurred
     if background_task_exceptions:
         pytest.fail(
             f"Background tasks raised {len(background_task_exceptions)} exception(s):\n"
             + "\n\n".join(f"{type(e).__name__}: {e}" for e in background_task_exceptions)
         )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
try:
loop = asyncio.get_running_loop()
except RuntimeError:
asyncio.run(_wait_for_tasks(tasks_list))
else:
loop.run_until_complete(_wait_for_tasks(tasks_list))
asyncio.run(_wait_for_tasks(tasks_list))


# Fail if any exceptions occurred
if background_task_exceptions:
pytest.fail(
f"Background tasks raised {len(background_task_exceptions)} exception(s):\n"
+ "\n\n".join(f"{type(e).__name__}: {e}" for e in background_task_exceptions)
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import asyncio
import logging
import traceback
from collections import deque
from weakref import WeakSet

logger = logging.getLogger(__name__)
background_tasks_set: WeakSet[asyncio.Task[None]] = WeakSet()
background_task_exceptions: deque[Exception] = deque(
maxlen=100 # don't grow infinitely in production
)
Comment on lines +9 to +11
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Consider making the exception buffer size configurable.

The maxlen=100 limit for background exceptions is hardcoded. For high-throughput applications or debugging scenarios, this might be too small. Consider making it configurable via an environment variable.

Example:

+import os
+
+MAX_EXCEPTIONS = int(os.getenv("MAX_BACKGROUND_EXCEPTIONS", "100"))
+
 background_task_exceptions: deque[Exception] = deque(
-    maxlen=100  # don't grow infinitely in production
+    maxlen=MAX_EXCEPTIONS  # don't grow infinitely in production
 )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
background_task_exceptions: deque[Exception] = deque(
maxlen=100 # don't grow infinitely in production
)
import os
MAX_EXCEPTIONS = int(os.getenv("MAX_BACKGROUND_EXCEPTIONS", "100"))
background_task_exceptions: deque[Exception] = deque(
maxlen=MAX_EXCEPTIONS # don't grow infinitely in production
)
🤖 Prompt for AI Agents
In template/copier_template_resources/{% if
template_might_want_to_use_python_asyncio %}python_asyncio{% endif
%}/background_tasks.py around lines 9 to 11, the deque maxlen is hardcoded to
100; change it to read an environment variable (e.g.
BACKGROUND_TASK_EXCEPTIONS_MAXLEN) with a sensible default of 100, parse it as
an integer with safe fallback on invalid values, and validate it is a positive
integer before passing it to deque so the buffer size can be configured
per-deployment.

# Store creation tracebacks for debugging
_task_creation_tracebacks: dict[int, str] = {}


def _task_done_callback(task: asyncio.Task[None]):
task_id = id(task)
background_tasks_set.discard(task)
try:
task.result()
except ( # pragma: no cover # hard to unit test this, but it'd be good to think of a way to do so
asyncio.CancelledError
):
_ = _task_creation_tracebacks.pop(task_id, None)
return
except Exception as e: # pragma: no cover # hard to unit test this, but it'd be good to think of a way to do so
creation_tb = _task_creation_tracebacks.pop(task_id, "No traceback available")
logger.exception(f"Unhandled exception in background task\nTask was created from:\n{creation_tb}")
background_task_exceptions.append(e)
Comment on lines +21 to +29
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Testing suggestion for exception handlers.

The pragma: no cover comments on Lines 21 and 26 acknowledge testing challenges. Consider adding a test helper that creates a task designed to fail, registers it, and verifies the exception is captured.

Example test:

async def test_background_task_exception_captured():
    async def failing_task():
        raise ValueError("test error")
    
    task = asyncio.create_task(failing_task())
    register_task(task)
    
    await asyncio.sleep(0.1)  # Let task complete
    
    assert len(background_task_exceptions) == 1
    assert isinstance(background_task_exceptions[0], ValueError)

else:
# Clean up on successful completion
_ = _task_creation_tracebacks.pop(task_id, None)


def register_task(task: asyncio.Task[None]) -> None:
# Capture the stack trace at task creation time (excluding this function)
creation_stack = "".join(traceback.format_stack()[:-1])
_task_creation_tracebacks[id(task)] = creation_stack

background_tasks_set.add(task)
task.add_done_callback(_task_done_callback)
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,16 @@ import pytest
from pydantic import JsonValue
from vcr import VCR

ALLOWED_HOSTS = ["testserver"] # Skip recording any requests to our own server - let them run live
UNREACHABLE_IP_ADDRESS = "192.0.2.1" # RFC 5737 TEST-NET-1
IGNORED_HOSTS = [
"testserver", # Skip recording any requests to our own server - let them run live
UNREACHABLE_IP_ADDRESS, # allow this through VCR in order to be able to test network failure handling
]
ALLOWED_HOSTS: list[str] = []

CUSTOM_ALLOWED_HOSTS: tuple[str, ...] = ()
CUSTOM_IGNORED_HOSTS: tuple[str, ...] = ()

ALLOWED_HOSTS.extend(CUSTOM_ALLOWED_HOSTS)
IGNORED_HOSTS.extend(CUSTOM_IGNORED_HOSTS)
Comment on lines +15 to +17
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Guard against accidental string “custom hosts” input (would extend by characters).

If a user edits CUSTOM_IGNORED_HOSTS to a single string (common mistake), IGNORED_HOSTS.extend(CUSTOM_IGNORED_HOSTS) will add characters instead of a hostname. Consider a runtime assertion to fail fast.

 CUSTOM_IGNORED_HOSTS: tuple[str, ...] = ()
 
+assert not isinstance(CUSTOM_IGNORED_HOSTS, str), "CUSTOM_IGNORED_HOSTS must be a tuple[str, ...], not a str"
 IGNORED_HOSTS.extend(CUSTOM_IGNORED_HOSTS)
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
CUSTOM_IGNORED_HOSTS: tuple[str, ...] = ()
ALLOWED_HOSTS.extend(CUSTOM_ALLOWED_HOSTS)
IGNORED_HOSTS.extend(CUSTOM_IGNORED_HOSTS)
CUSTOM_IGNORED_HOSTS: tuple[str, ...] = ()
assert not isinstance(CUSTOM_IGNORED_HOSTS, str), "CUSTOM_IGNORED_HOSTS must be a tuple[str, ...], not a str"
IGNORED_HOSTS.extend(CUSTOM_IGNORED_HOSTS)
🤖 Prompt for AI Agents
In template/copier_template_resources/{% if template_might_want_to_use_vcrpy
%}vcrpy_fixtures.py{% endif %} around lines 11 to 13, guard against accidental
assignment of CUSTOM_IGNORED_HOSTS to a single string which would cause
IGNOREDS_HOSTS.extend(...) to add characters; add a runtime assertion or
explicit type check that CUSTOM_IGNORED_HOSTS is a tuple (or at least a non-str
iterable) of strings and raise a clear TypeError/AssertionError if not, then
safely call IGNORED_HOSTS.extend(CUSTOM_IGNORED_HOSTS).

if (
os.name == "nt"
): # on Windows (in CI), the network calls happen at a lower level socket connection even to our FastAPI test client, and can get automatically blocked. This disables that automatic network guard, which isn't great...but since it's still in place on Linux, any actual problems would hopefully get caught before pushing to CI.
Expand All @@ -18,12 +23,18 @@ if (

@pytest.fixture(autouse=True)
def vcr_config() -> dict[str, list[str]]:
return {"allowed_hosts": ALLOWED_HOSTS, "filter_headers": ["User-Agent"]}
cfg: dict[str, list[str]] = {
"ignore_hosts": IGNORED_HOSTS,
"filter_headers": ["User-Agent"],
}
if ALLOWED_HOSTS:
cfg["allowed_hosts"] = ALLOWED_HOSTS
return cfg


def pytest_recording_configure(
vcr: VCR,
config: pytest.Config, # noqa: ARG001 # the config argument MUST be present (even when unused) or pytest-recording throws an error
vcr: VCR,
):
vcr.match_on = cast(tuple[str, ...], vcr.match_on) # pyright: ignore[reportUnknownMemberType] # I know vcr.match_on is unknown, that's why I'm casting and isinstance-ing it...not sure if there's a different approach pyright prefers
assert isinstance(vcr.match_on, tuple), (
Expand Down
1 change: 1 addition & 0 deletions template/tests/copier_data/data1.yaml.jinja-base
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ ssh_port_number: 12345
use_windows_in_ci: false
{% endraw %}{% if template_might_want_to_install_aws_ssm_port_forwarding_plugin %}{% raw %}install_aws_ssm_port_forwarding_plugin: true{% endraw %}{% endif %}{% raw %}
{% endraw %}{% if template_might_want_to_use_vcrpy %}{% raw %}configure_vcrpy: true{% endraw %}{% endif %}{% raw %}
{% endraw %}{% if template_might_want_to_use_python_asyncio %}{% raw %}configure_python_asyncio: true{% endraw %}{% endif %}{% raw %}
{% endraw %}{% if template_uses_javascript %}{% raw %}
node_version: 22.13.0{% endraw %}{% endif %}{% raw %}
{% endraw %}{% if template_uses_python %}{% raw %}
Expand Down
1 change: 1 addition & 0 deletions template/tests/copier_data/data2.yaml.jinja-base
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ ssh_port_number: 54321
use_windows_in_ci: true
{% endraw %}{% if template_might_want_to_install_aws_ssm_port_forwarding_plugin %}{% raw %}install_aws_ssm_port_forwarding_plugin: false{% endraw %}{% endif %}{% raw %}
{% endraw %}{% if template_might_want_to_use_vcrpy %}{% raw %}configure_vcrpy: false{% endraw %}{% endif %}{% raw %}
{% endraw %}{% if template_might_want_to_use_python_asyncio %}{% raw %}configure_python_asyncio: false{% endraw %}{% endif %}{% raw %}
{% endraw %}{% if template_uses_javascript %}{% raw %}
node_version: 22.14.0{% endraw %}{% endif %}{% raw %}
{% endraw %}{% if template_uses_python %}{% raw %}
Expand Down
1 change: 1 addition & 0 deletions tests/copier_data/data1.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,4 @@ template_uses_javascript: false
template_uses_vuejs: false
template_might_want_to_install_aws_ssm_port_forwarding_plugin: true
template_might_want_to_use_vcrpy: false
template_might_want_to_use_python_asyncio: true
1 change: 1 addition & 0 deletions tests/copier_data/data2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,4 @@ template_uses_javascript: true
template_uses_vuejs: true
template_might_want_to_install_aws_ssm_port_forwarding_plugin: false
template_might_want_to_use_vcrpy: true
template_might_want_to_use_python_asyncio: false