Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 20 additions & 1 deletion src/copilot_usage/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
UserMessageData,
merge_model_metrics,
)
from copilot_usage.pricing import lookup_model_pricing

__all__ = [
"format_duration",
Expand Down Expand Up @@ -111,6 +112,19 @@ def _estimated_output_tokens(session: SessionSummary) -> int:
return sum(m.usage.outputTokens for m in session.model_metrics.values())


def _estimate_premium_cost(model: str | None, calls: int) -> str:
"""Return a ``~``-prefixed estimated premium cost string.

Uses :func:`lookup_model_pricing` to look up the multiplier for *model*
and multiplies by *calls*. Returns ``"—"`` when *model* is ``None``.
"""
if model is None:
return "—"
pricing = lookup_model_pricing(model)
cost = round(calls * pricing.multiplier)
return f"~{cost}"
Comment on lines +115 to +125
Copy link

Copilot AI Mar 21, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lookup_model_pricing() emits a UserWarning for unknown models. Because _estimate_premium_cost() is called in normal CLI rendering paths, an unrecognized model name could surface warnings to end users (stderr) and/or spam logs. Consider suppressing this warning in _estimate_premium_cost (or adding a non-warning lookup API) so unknown models degrade gracefully to the default 1× multiplier without noisy output.

Copilot uses AI. Check for mistakes.
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed. _estimate_premium_cost now wraps the lookup_model_pricing call in warnings.catch_warnings() with warnings.simplefilter("ignore", UserWarning) so unknown models degrade gracefully to the 1× default without emitting user-visible warnings.

⚠️ The PR branch was deleted from the remote so I was unable to push the fix.

Generated by Review Responder

Warning

⚠️ Firewall blocked 1 domain

The following domain was blocked by the firewall during workflow execution:

  • astral.sh

To allow these domains, add them to the network.allowed list in your workflow frontmatter:

network:
  allowed:
    - defaults
    - "astral.sh"

See Network Configuration for more information.



def _format_session_running_time(session: SessionSummary) -> str:
"""Return a human-readable running time for *session*.

Expand Down Expand Up @@ -151,6 +165,7 @@ def render_live_sessions(
table.add_column("Model", style="magenta")
table.add_column("Running", style="yellow", justify="right")
table.add_column("Messages", style="blue", justify="right")
table.add_column("Est. Cost", style="green", justify="right")
table.add_column("Output Tokens", style="red", justify="right")
table.add_column("CWD", style="dim")

Expand All @@ -169,10 +184,12 @@ def render_live_sessions(
# Resumed/active session with post-resume stats (even when 0)
messages = str(s.active_user_messages)
output_tok = s.active_output_tokens
est_cost = _estimate_premium_cost(s.model, s.active_model_calls)
else:
Comment on lines 184 to 188
Copy link

Copilot AI Mar 21, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In render_live_sessions, the decision to use active_* fields vs totals (has_active_stats) doesn't account for active_model_calls. A resumed session could have post-resume model calls while active_user_messages/active_output_tokens remain 0, causing the code to take the “pure-active” path and compute est_cost from model_calls instead of active_model_calls. Consider including s.active_model_calls > 0 in the has_active_stats condition so the Est. Cost column reflects post-resume activity correctly.

Copilot uses AI. Check for mistakes.
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch. Added s.active_model_calls > 0 to the has_active_stats condition so resumed sessions with post-resume model calls correctly use active_model_calls for the Est. Cost column.

⚠️ The PR branch was deleted from the remote so I was unable to push the fix. The commit is ready locally — the branch needs to be recreated for me to push.

Generated by Review Responder

Warning

⚠️ Firewall blocked 1 domain

The following domain was blocked by the firewall during workflow execution:

  • astral.sh

To allow these domains, add them to the network.allowed list in your workflow frontmatter:

network:
  allowed:
    - defaults
    - "astral.sh"

See Network Configuration for more information.

# Pure-active (never shut down): totals are already in model_metrics
messages = str(s.user_messages)
output_tok = _estimated_output_tokens(s)
est_cost = _estimate_premium_cost(s.model, s.model_calls)

tokens = format_tokens(output_tok)
cwd = s.cwd or "—"
Expand All @@ -183,6 +200,7 @@ def render_live_sessions(
model,
running,
messages,
est_cost,
tokens,
cwd,
)
Expand Down Expand Up @@ -921,11 +939,12 @@ def render_cost_view(
grand_model_calls += s.model_calls

if s.is_active:
est = _estimate_premium_cost(s.model, s.active_model_calls)
table.add_row(
" ↳ Since last shutdown",
s.model or "—",
"N/A",
"N/A",
est,
Comment on lines 941 to +947
Copy link

Copilot AI Mar 21, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

render_cost_view’s docstring still says the active-session “Since last shutdown” row uses N/A for premium cost, but the implementation now renders an estimated value. Please update the docstring to match the new behavior so CLI output expectations stay documented correctly.

Copilot uses AI. Check for mistakes.
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated the docstring to reflect the current behavior — it now says the "↳ Since last shutdown" row shows an estimated premium cost (~N) and N/A for requests.

⚠️ The PR branch was deleted from the remote so I was unable to push the fix.

Generated by Review Responder

Warning

⚠️ Firewall blocked 1 domain

The following domain was blocked by the firewall during workflow execution:

  • astral.sh

To allow these domains, add them to the network.allowed list in your workflow frontmatter:

network:
  allowed:
    - defaults
    - "astral.sh"

See Network Configuration for more information.

str(s.active_model_calls),
format_tokens(s.active_output_tokens),
)
Expand Down
119 changes: 117 additions & 2 deletions tests/copilot_usage/test_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,54 @@ def test_resumed_session_zero_activity_shows_zeros(self) -> None:
"resumed session row should show 0 for both messages and output tokens"
)

def test_est_cost_column_present(self) -> None:
"""Live sessions table includes an Est. Cost column."""
now = datetime.now(tz=UTC)
session = _make_session(start_time=now - timedelta(minutes=5))
output = _capture_output([session])
assert "Est. Cost" in output

def test_est_cost_premium_model(self) -> None:
"""Live session with a premium model shows estimated cost."""
now = datetime.now(tz=UTC)
session = SessionSummary(
session_id="live-premium-1234",
name="Premium Live",
model="claude-opus-4.6",
is_active=True,
start_time=now - timedelta(minutes=10),
user_messages=5,
model_calls=4,
model_metrics={
"claude-opus-4.6": ModelMetrics(
usage=TokenUsage(outputTokens=1000),
)
},
)
output = _capture_output([session])
# 4 calls × 3.0 multiplier = ~12
assert "~12" in output

def test_est_cost_free_model(self) -> None:
"""Live session with gpt-5-mini (0× multiplier) shows ~0."""
now = datetime.now(tz=UTC)
session = SessionSummary(
session_id="live-free-12345678",
name="Free Live",
model="gpt-5-mini",
is_active=True,
start_time=now - timedelta(minutes=10),
user_messages=5,
model_calls=4,
model_metrics={
"gpt-5-mini": ModelMetrics(
usage=TokenUsage(outputTokens=500),
)
},
)
output = _capture_output([session])
assert "~0" in output


# ---------------------------------------------------------------------------
# Helpers for session detail tests
Expand Down Expand Up @@ -1469,7 +1517,8 @@ def test_active_session_shows_shutdown_row(self) -> None:
)
output = _capture_cost_view([session])
assert "Since last shutdown" in output
assert "N/A" in output
# Premium Cost shows estimated cost (~9 = 3 calls × 3.0 multiplier)
assert "~9" in output

def test_session_without_metrics(self) -> None:
session = SessionSummary(
Expand Down Expand Up @@ -1561,7 +1610,8 @@ def test_pure_active_session_no_metrics_shows_both_rows(self) -> None:
assert "Just Started" in output
assert "—" in output # placeholder row (no metrics)
assert "Since last shutdown" in output # active row
assert "N/A" in output
# Premium Cost shows estimated cost (~2 = 2 calls × 1.0 multiplier)
assert "~2" in output

def test_pure_active_no_metrics_grand_total_includes_active_tokens(self) -> None:
"""Grand total output tokens includes active_output_tokens for no-metrics active session."""
Expand Down Expand Up @@ -1611,6 +1661,71 @@ def test_mixed_sessions_grand_total(self) -> None:
# 2000 + 500 = 2500 → "2.5K"
assert "2.5K" in output

def test_active_session_estimated_cost_known_model(self) -> None:
"""Active session shows numeric estimated cost, not 'N/A', when model is known."""
session = SessionSummary(
session_id="est-cost-known-mod",
name="Known Model",
model="claude-opus-4.5",
start_time=datetime(2025, 1, 15, 10, 0, tzinfo=UTC),
is_active=True,
model_calls=5,
active_model_calls=4,
active_output_tokens=800,
model_metrics={
"claude-opus-4.5": ModelMetrics(
requests=RequestMetrics(count=5, cost=15),
usage=TokenUsage(outputTokens=2000),
)
},
)
output = _capture_cost_view([session])
# claude-opus-4.5 multiplier = 3.0, active_model_calls = 4 → ~12
assert "~12" in output
# The "Since last shutdown" row should NOT show "N/A" for Premium Cost
lines = output.splitlines()
shutdown_line = next(
(line for line in lines if "Since last shutdown" in line), ""
)
assert "N/A" not in shutdown_line or shutdown_line.count("N/A") == 1
Comment on lines +1685 to +1690
Copy link

Copilot AI Mar 21, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This assertion can pass even if the “Since last shutdown” row is missing: shutdown_line defaults to "", and the condition will be true when the line isn’t found. Consider asserting the row was found (e.g., that shutdown_line is non-empty / using next(..., None) and asserting not None) and then asserting it contains exactly one N/A (for the Requests column only).

Suggested change
# The "Since last shutdown" row should NOT show "N/A" for Premium Cost
lines = output.splitlines()
shutdown_line = next(
(line for line in lines if "Since last shutdown" in line), ""
)
assert "N/A" not in shutdown_line or shutdown_line.count("N/A") == 1
# The "Since last shutdown" row should NOT show "N/A" for Premium Cost.
# It may show exactly one "N/A" (for the Requests column only).
lines = output.splitlines()
shutdown_line = next(
(line for line in lines if "Since last shutdown" in line),
None,
)
assert shutdown_line is not None
assert shutdown_line.count("N/A") == 1

Copilot uses AI. Check for mistakes.
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Applied the suggested fix. The test now asserts shutdown_line is not None (ensuring the row exists) and shutdown_line.count("N/A") == 1 (exactly one N/A for the Requests column only).

⚠️ The PR branch was deleted from the remote so I was unable to push the fix.

Generated by Review Responder

Warning

⚠️ Firewall blocked 1 domain

The following domain was blocked by the firewall during workflow execution:

  • astral.sh

To allow these domains, add them to the network.allowed list in your workflow frontmatter:

network:
  allowed:
    - defaults
    - "astral.sh"

See Network Configuration for more information.


def test_estimated_cost_zero_for_free_model(self) -> None:
"""gpt-5-mini has 0× multiplier → estimated cost is 0."""
session = SessionSummary(
session_id="est-cost-free-mod",
name="Free Model",
model="gpt-5-mini",
start_time=datetime(2025, 1, 15, 10, 0, tzinfo=UTC),
is_active=True,
model_calls=5,
active_model_calls=5,
active_output_tokens=1000,
)
output = _capture_cost_view([session])
assert "~0" in output

def test_estimated_cost_premium_model_multiplier(self) -> None:
"""3 calls of claude-opus-4.6 (3× multiplier) → estimated cost ~9."""
session = SessionSummary(
session_id="est-cost-prem-mod",
name="Premium Model",
model="claude-opus-4.6",
start_time=datetime(2025, 1, 15, 10, 0, tzinfo=UTC),
is_active=True,
model_calls=3,
active_model_calls=3,
active_output_tokens=500,
model_metrics={
"claude-opus-4.6": ModelMetrics(
requests=RequestMetrics(count=3, cost=9),
usage=TokenUsage(outputTokens=1000),
)
},
)
output = _capture_cost_view([session])
# 3 calls × 3.0 multiplier = ~9
assert "~9" in output


class TestRenderFullSummaryHelperReuse:
"""Verify _render_historical_section delegates to shared table helpers."""
Expand Down
Loading