diff --git a/.agents/skills/dotagents/SKILL.md b/.agents/skills/dotagents/SKILL.md index 85be3601419b..d3ee7bd97d4d 100644 --- a/.agents/skills/dotagents/SKILL.md +++ b/.agents/skills/dotagents/SKILL.md @@ -5,6 +5,10 @@ description: Manage agent skill dependencies with dotagents. Use when asked to " Manage agent skill dependencies declared in `agents.toml`. dotagents resolves, installs, and symlinks skills so multiple agent tools (Claude Code, Cursor, Codex, VS Code, OpenCode) discover them from `.agents/skills/`. +## Running dotagents + +If `dotagents` is not available as a direct command, use `npx @sentry/dotagents` instead. For example: `npx @sentry/dotagents sync`. All commands and flags work the same way. + ## References Read the relevant reference when the task requires deeper detail: diff --git a/.agents/skills/skill-creator/LICENSE.txt b/.agents/skills/skill-creator/LICENSE.txt new file mode 100644 index 000000000000..7a4a3ea2424c --- /dev/null +++ b/.agents/skills/skill-creator/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/.agents/skills/skill-creator/SKILL.md b/.agents/skills/skill-creator/SKILL.md new file mode 100644 index 000000000000..fd8bf01b4f59 --- /dev/null +++ b/.agents/skills/skill-creator/SKILL.md @@ -0,0 +1,506 @@ +--- +name: skill-creator +description: Create new skills, modify and improve existing skills, and measure skill performance. Use when users want to create a skill from scratch, edit, or optimize an existing skill, run evals to test a skill, benchmark skill performance with variance analysis, or optimize a skill's description for better triggering accuracy. +--- + +# Skill Creator + +A skill for creating new skills and iteratively improving them. + +At a high level, the process of creating a skill goes like this: + +- Decide what you want the skill to do and roughly how it should do it +- Write a draft of the skill +- Create a few test prompts and run claude-with-access-to-the-skill on them +- Help the user evaluate the results both qualitatively and quantitatively + - While the runs happen in the background, draft some quantitative evals if there aren't any (if there are some, you can either use as is or modify if you feel something needs to change about them). Then explain them to the user (or if they already existed, explain the ones that already exist) + - Use the `eval-viewer/generate_review.py` script to show the user the results for them to look at, and also let them look at the quantitative metrics +- Rewrite the skill based on feedback from the user's evaluation of the results (and also if there are any glaring flaws that become apparent from the quantitative benchmarks) +- Repeat until you're satisfied +- Expand the test set and try again at larger scale + +Your job when using this skill is to figure out where the user is in this process and then jump in and help them progress through these stages. So for instance, maybe they're like "I want to make a skill for X". You can help narrow down what they mean, write a draft, write the test cases, figure out how they want to evaluate, run all the prompts, and repeat. + +On the other hand, maybe they already have a draft of the skill. In this case you can go straight to the eval/iterate part of the loop. + +Of course, you should always be flexible and if the user is like "I don't need to run a bunch of evaluations, just vibe with me", you can do that instead. + +Then after the skill is done (but again, the order is flexible), you can also run the skill description improver, which we have a whole separate script for, to optimize the triggering of the skill. + +Cool? Cool. + +## Communicating with the user + +The skill creator is liable to be used by people across a wide range of familiarity with coding jargon. If you haven't heard (and how could you, it's only very recently that it started), there's a trend now where the power of Claude is inspiring plumbers to open up their terminals, parents and grandparents to google "how to install npm". On the other hand, the bulk of users are probably fairly computer-literate. + +So please pay attention to context cues to understand how to phrase your communication! In the default case, just to give you some idea: + +- "evaluation" and "benchmark" are borderline, but OK +- for "JSON" and "assertion" you want to see serious cues from the user that they know what those things are before using them without explaining them + +It's OK to briefly explain terms if you're in doubt, and feel free to clarify terms with a short definition if you're unsure if the user will get it. + +--- + +## Creating a skill + +### Capture Intent + +Start by understanding the user's intent. The current conversation might already contain a workflow the user wants to capture (e.g., they say "turn this into a skill"). If so, extract answers from the conversation history first — the tools used, the sequence of steps, corrections the user made, input/output formats observed. The user may need to fill the gaps, and should confirm before proceeding to the next step. + +1. What should this skill enable Claude to do? +2. When should this skill trigger? (what user phrases/contexts) +3. What's the expected output format? +4. Should we set up test cases to verify the skill works? Skills with objectively verifiable outputs (file transforms, data extraction, code generation, fixed workflow steps) benefit from test cases. Skills with subjective outputs (writing style, art) often don't need them. Suggest the appropriate default based on the skill type, but let the user decide. + +### Interview and Research + +Proactively ask questions about edge cases, input/output formats, example files, success criteria, and dependencies. Wait to write test prompts until you've got this part ironed out. + +Check available MCPs - if useful for research (searching docs, finding similar skills, looking up best practices), research in parallel via subagents if available, otherwise inline. Come prepared with context to reduce burden on the user. + +### Write the SKILL.md + +Based on the user interview, fill in these components: + +- **name**: Skill identifier +- **description**: When to trigger, what it does. This is the primary triggering mechanism - include both what the skill does AND specific contexts for when to use it. All "when to use" info goes here, not in the body. Note: currently Claude has a tendency to "undertrigger" skills -- to not use them when they'd be useful. To combat this, please make the skill descriptions a little bit "pushy". So for instance, instead of "How to build a simple fast dashboard to display internal Anthropic data.", you might write "How to build a simple fast dashboard to display internal Anthropic data. Make sure to use this skill whenever the user mentions dashboards, data visualization, internal metrics, or wants to display any kind of company data, even if they don't explicitly ask for a 'dashboard.'" +- **compatibility**: Required tools, dependencies (optional, rarely needed) +- **the rest of the skill :)** + +### Skill Writing Guide + +#### Anatomy of a Skill + +``` +skill-name/ +├── SKILL.md (required) +│ ├── YAML frontmatter (name, description required) +│ └── Markdown instructions +└── Bundled Resources (optional) + ├── scripts/ - Executable code for deterministic/repetitive tasks + ├── references/ - Docs loaded into context as needed + └── assets/ - Files used in output (templates, icons, fonts) +``` + +#### Progressive Disclosure + +Skills use a three-level loading system: + +1. **Metadata** (name + description) - Always in context (~100 words) +2. **SKILL.md body** - In context whenever skill triggers (<500 lines ideal) +3. **Bundled resources** - As needed (unlimited, scripts can execute without loading) + +These word counts are approximate and you can feel free to go longer if needed. + +**Key patterns:** + +- Keep SKILL.md under 500 lines; if you're approaching this limit, add an additional layer of hierarchy along with clear pointers about where the model using the skill should go next to follow up. +- Reference files clearly from SKILL.md with guidance on when to read them +- For large reference files (>300 lines), include a table of contents + +**Domain organization**: When a skill supports multiple domains/frameworks, organize by variant: + +``` +cloud-deploy/ +├── SKILL.md (workflow + selection) +└── references/ + ├── aws.md + ├── gcp.md + └── azure.md +``` + +Claude reads only the relevant reference file. + +#### Principle of Lack of Surprise + +This goes without saying, but skills must not contain malware, exploit code, or any content that could compromise system security. A skill's contents should not surprise the user in their intent if described. Don't go along with requests to create misleading skills or skills designed to facilitate unauthorized access, data exfiltration, or other malicious activities. Things like a "roleplay as an XYZ" are OK though. + +#### Writing Patterns + +Prefer using the imperative form in instructions. + +**Defining output formats** - You can do it like this: + +```markdown +## Report structure + +ALWAYS use this exact template: + +# [Title] + +## Executive summary + +## Key findings + +## Recommendations +``` + +**Examples pattern** - It's useful to include examples. You can format them like this (but if "Input" and "Output" are in the examples you might want to deviate a little): + +```markdown +## Commit message format + +**Example 1:** +Input: Added user authentication with JWT tokens +Output: feat(auth): implement JWT-based authentication +``` + +### Writing Style + +Try to explain to the model why things are important in lieu of heavy-handed musty MUSTs. Use theory of mind and try to make the skill general and not super-narrow to specific examples. Start by writing a draft and then look at it with fresh eyes and improve it. + +### Test Cases + +After writing the skill draft, come up with 2-3 realistic test prompts — the kind of thing a real user would actually say. Share them with the user: [you don't have to use this exact language] "Here are a few test cases I'd like to try. Do these look right, or do you want to add more?" Then run them. + +Save test cases to `evals/evals.json`. Don't write assertions yet — just the prompts. You'll draft assertions in the next step while the runs are in progress. + +```json +{ + "skill_name": "example-skill", + "evals": [ + { + "id": 1, + "prompt": "User's task prompt", + "expected_output": "Description of expected result", + "files": [] + } + ] +} +``` + +See `references/schemas.md` for the full schema (including the `assertions` field, which you'll add later). + +## Running and evaluating test cases + +This section is one continuous sequence — don't stop partway through. Do NOT use `/skill-test` or any other testing skill. + +Put results in `-workspace/` as a sibling to the skill directory. Within the workspace, organize results by iteration (`iteration-1/`, `iteration-2/`, etc.) and within that, each test case gets a directory (`eval-0/`, `eval-1/`, etc.). Don't create all of this upfront — just create directories as you go. + +### Step 1: Spawn all runs (with-skill AND baseline) in the same turn + +For each test case, spawn two subagents in the same turn — one with the skill, one without. This is important: don't spawn the with-skill runs first and then come back for baselines later. Launch everything at once so it all finishes around the same time. + +**With-skill run:** + +``` +Execute this task: +- Skill path: +- Task: +- Input files: +- Save outputs to: /iteration-/eval-/with_skill/outputs/ +- Outputs to save: +``` + +**Baseline run** (same prompt, but the baseline depends on context): + +- **Creating a new skill**: no skill at all. Same prompt, no skill path, save to `without_skill/outputs/`. +- **Improving an existing skill**: the old version. Before editing, snapshot the skill (`cp -r /skill-snapshot/`), then point the baseline subagent at the snapshot. Save to `old_skill/outputs/`. + +Write an `eval_metadata.json` for each test case (assertions can be empty for now). Give each eval a descriptive name based on what it's testing — not just "eval-0". Use this name for the directory too. If this iteration uses new or modified eval prompts, create these files for each new eval directory — don't assume they carry over from previous iterations. + +```json +{ + "eval_id": 0, + "eval_name": "descriptive-name-here", + "prompt": "The user's task prompt", + "assertions": [] +} +``` + +### Step 2: While runs are in progress, draft assertions + +Don't just wait for the runs to finish — you can use this time productively. Draft quantitative assertions for each test case and explain them to the user. If assertions already exist in `evals/evals.json`, review them and explain what they check. + +Good assertions are objectively verifiable and have descriptive names — they should read clearly in the benchmark viewer so someone glancing at the results immediately understands what each one checks. Subjective skills (writing style, design quality) are better evaluated qualitatively — don't force assertions onto things that need human judgment. + +Update the `eval_metadata.json` files and `evals/evals.json` with the assertions once drafted. Also explain to the user what they'll see in the viewer — both the qualitative outputs and the quantitative benchmark. + +### Step 3: As runs complete, capture timing data + +When each subagent task completes, you receive a notification containing `total_tokens` and `duration_ms`. Save this data immediately to `timing.json` in the run directory: + +```json +{ + "total_tokens": 84852, + "duration_ms": 23332, + "total_duration_seconds": 23.3 +} +``` + +This is the only opportunity to capture this data — it comes through the task notification and isn't persisted elsewhere. Process each notification as it arrives rather than trying to batch them. + +### Step 4: Grade, aggregate, and launch the viewer + +Once all runs are done: + +1. **Grade each run** — spawn a grader subagent (or grade inline) that reads `agents/grader.md` and evaluates each assertion against the outputs. Save results to `grading.json` in each run directory. The grading.json expectations array must use the fields `text`, `passed`, and `evidence` (not `name`/`met`/`details` or other variants) — the viewer depends on these exact field names. For assertions that can be checked programmatically, write and run a script rather than eyeballing it — scripts are faster, more reliable, and can be reused across iterations. + +2. **Aggregate into benchmark** — run the aggregation script from the skill-creator directory: + + ```bash + python -m scripts.aggregate_benchmark /iteration-N --skill-name + ``` + + This produces `benchmark.json` and `benchmark.md` with pass_rate, time, and tokens for each configuration, with mean ± stddev and the delta. If generating benchmark.json manually, see `references/schemas.md` for the exact schema the viewer expects. + Put each with_skill version before its baseline counterpart. + +3. **Do an analyst pass** — read the benchmark data and surface patterns the aggregate stats might hide. See `agents/analyzer.md` (the "Analyzing Benchmark Results" section) for what to look for — things like assertions that always pass regardless of skill (non-discriminating), high-variance evals (possibly flaky), and time/token tradeoffs. + +4. **Launch the viewer** with both qualitative outputs and quantitative data: + + ```bash + nohup python /eval-viewer/generate_review.py \ + /iteration-N \ + --skill-name "my-skill" \ + --benchmark /iteration-N/benchmark.json \ + > /dev/null 2>&1 & + VIEWER_PID=$! + ``` + + For iteration 2+, also pass `--previous-workspace /iteration-`. + + **Cowork / headless environments:** If `webbrowser.open()` is not available or the environment has no display, use `--static ` to write a standalone HTML file instead of starting a server. Feedback will be downloaded as a `feedback.json` file when the user clicks "Submit All Reviews". After download, copy `feedback.json` into the workspace directory for the next iteration to pick up. + +Note: please use generate_review.py to create the viewer; there's no need to write custom HTML. + +5. **Tell the user** something like: "I've opened the results in your browser. There are two tabs — 'Outputs' lets you click through each test case and leave feedback, 'Benchmark' shows the quantitative comparison. When you're done, come back here and let me know." + +### What the user sees in the viewer + +The "Outputs" tab shows one test case at a time: + +- **Prompt**: the task that was given +- **Output**: the files the skill produced, rendered inline where possible +- **Previous Output** (iteration 2+): collapsed section showing last iteration's output +- **Formal Grades** (if grading was run): collapsed section showing assertion pass/fail +- **Feedback**: a textbox that auto-saves as they type +- **Previous Feedback** (iteration 2+): their comments from last time, shown below the textbox + +The "Benchmark" tab shows the stats summary: pass rates, timing, and token usage for each configuration, with per-eval breakdowns and analyst observations. + +Navigation is via prev/next buttons or arrow keys. When done, they click "Submit All Reviews" which saves all feedback to `feedback.json`. + +### Step 5: Read the feedback + +When the user tells you they're done, read `feedback.json`: + +```json +{ + "reviews": [ + { "run_id": "eval-0-with_skill", "feedback": "the chart is missing axis labels", "timestamp": "..." }, + { "run_id": "eval-1-with_skill", "feedback": "", "timestamp": "..." }, + { "run_id": "eval-2-with_skill", "feedback": "perfect, love this", "timestamp": "..." } + ], + "status": "complete" +} +``` + +Empty feedback means the user thought it was fine. Focus your improvements on the test cases where the user had specific complaints. + +Kill the viewer server when you're done with it: + +```bash +kill $VIEWER_PID 2>/dev/null +``` + +--- + +## Improving the skill + +This is the heart of the loop. You've run the test cases, the user has reviewed the results, and now you need to make the skill better based on their feedback. + +### How to think about improvements + +1. **Generalize from the feedback.** The big picture thing that's happening here is that we're trying to create skills that can be used a million times (maybe literally, maybe even more who knows) across many different prompts. Here you and the user are iterating on only a few examples over and over again because it helps move faster. The user knows these examples in and out and it's quick for them to assess new outputs. But if the skill you and the user are codeveloping works only for those examples, it's useless. Rather than put in fiddly overfitty changes, or oppressively constrictive MUSTs, if there's some stubborn issue, you might try branching out and using different metaphors, or recommending different patterns of working. It's relatively cheap to try and maybe you'll land on something great. + +2. **Keep the prompt lean.** Remove things that aren't pulling their weight. Make sure to read the transcripts, not just the final outputs — if it looks like the skill is making the model waste a bunch of time doing things that are unproductive, you can try getting rid of the parts of the skill that are making it do that and seeing what happens. + +3. **Explain the why.** Try hard to explain the **why** behind everything you're asking the model to do. Today's LLMs are _smart_. They have good theory of mind and when given a good harness can go beyond rote instructions and really make things happen. Even if the feedback from the user is terse or frustrated, try to actually understand the task and why the user is writing what they wrote, and what they actually wrote, and then transmit this understanding into the instructions. If you find yourself writing ALWAYS or NEVER in all caps, or using super rigid structures, that's a yellow flag — if possible, reframe and explain the reasoning so that the model understands why the thing you're asking for is important. That's a more humane, powerful, and effective approach. + +4. **Look for repeated work across test cases.** Read the transcripts from the test runs and notice if the subagents all independently wrote similar helper scripts or took the same multi-step approach to something. If all 3 test cases resulted in the subagent writing a `create_docx.py` or a `build_chart.py`, that's a strong signal the skill should bundle that script. Write it once, put it in `scripts/`, and tell the skill to use it. This saves every future invocation from reinventing the wheel. + +This task is pretty important (we are trying to create billions a year in economic value here!) and your thinking time is not the blocker; take your time and really mull things over. I'd suggest writing a draft revision and then looking at it anew and making improvements. Really do your best to get into the head of the user and understand what they want and need. + +### The iteration loop + +After improving the skill: + +1. Apply your improvements to the skill +2. Rerun all test cases into a new `iteration-/` directory, including baseline runs. If you're creating a new skill, the baseline is always `without_skill` (no skill) — that stays the same across iterations. If you're improving an existing skill, use your judgment on what makes sense as the baseline: the original version the user came in with, or the previous iteration. +3. Launch the reviewer with `--previous-workspace` pointing at the previous iteration +4. Wait for the user to review and tell you they're done +5. Read the new feedback, improve again, repeat + +Keep going until: + +- The user says they're happy +- The feedback is all empty (everything looks good) +- You're not making meaningful progress + +--- + +## Advanced: Blind comparison + +For situations where you want a more rigorous comparison between two versions of a skill (e.g., the user asks "is the new version actually better?"), there's a blind comparison system. Read `agents/comparator.md` and `agents/analyzer.md` for the details. The basic idea is: give two outputs to an independent agent without telling it which is which, and let it judge quality. Then analyze why the winner won. + +This is optional, requires subagents, and most users won't need it. The human review loop is usually sufficient. + +--- + +## Description Optimization + +The description field in SKILL.md frontmatter is the primary mechanism that determines whether Claude invokes a skill. After creating or improving a skill, offer to optimize the description for better triggering accuracy. + +### Step 1: Generate trigger eval queries + +Create 20 eval queries — a mix of should-trigger and should-not-trigger. Save as JSON: + +```json +[ + { "query": "the user prompt", "should_trigger": true }, + { "query": "another prompt", "should_trigger": false } +] +``` + +The queries must be realistic and something a Claude Code or Claude.ai user would actually type. Not abstract requests, but requests that are concrete and specific and have a good amount of detail. For instance, file paths, personal context about the user's job or situation, column names and values, company names, URLs. A little bit of backstory. Some might be in lowercase or contain abbreviations or typos or casual speech. Use a mix of different lengths, and focus on edge cases rather than making them clear-cut (the user will get a chance to sign off on them). + +Bad: `"Format this data"`, `"Extract text from PDF"`, `"Create a chart"` + +Good: `"ok so my boss just sent me this xlsx file (its in my downloads, called something like 'Q4 sales final FINAL v2.xlsx') and she wants me to add a column that shows the profit margin as a percentage. The revenue is in column C and costs are in column D i think"` + +For the **should-trigger** queries (8-10), think about coverage. You want different phrasings of the same intent — some formal, some casual. Include cases where the user doesn't explicitly name the skill or file type but clearly needs it. Throw in some uncommon use cases and cases where this skill competes with another but should win. + +For the **should-not-trigger** queries (8-10), the most valuable ones are the near-misses — queries that share keywords or concepts with the skill but actually need something different. Think adjacent domains, ambiguous phrasing where a naive keyword match would trigger but shouldn't, and cases where the query touches on something the skill does but in a context where another tool is more appropriate. + +The key thing to avoid: don't make should-not-trigger queries obviously irrelevant. "Write a fibonacci function" as a negative test for a PDF skill is too easy — it doesn't test anything. The negative cases should be genuinely tricky. + +### Step 2: Review with user + +Present the eval set to the user for review using the HTML template: + +1. Read the template from `assets/eval_review.html` +2. Replace the placeholders: + - `__EVAL_DATA_PLACEHOLDER__` → the JSON array of eval items (no quotes around it — it's a JS variable assignment) + - `__SKILL_NAME_PLACEHOLDER__` → the skill's name + - `__SKILL_DESCRIPTION_PLACEHOLDER__` → the skill's current description +3. Write to a temp file (e.g., `/tmp/eval_review_.html`) and open it: `open /tmp/eval_review_.html` +4. The user can edit queries, toggle should-trigger, add/remove entries, then click "Export Eval Set" +5. The file downloads to `~/Downloads/eval_set.json` — check the Downloads folder for the most recent version in case there are multiple (e.g., `eval_set (1).json`) + +This step matters — bad eval queries lead to bad descriptions. + +### Step 3: Run the optimization loop + +Tell the user: "This will take some time — I'll run the optimization loop in the background and check on it periodically." + +Save the eval set to the workspace, then run in the background: + +```bash +python -m scripts.run_loop \ + --eval-set \ + --skill-path \ + --model \ + --max-iterations 5 \ + --verbose +``` + +Use the model ID from your system prompt (the one powering the current session) so the triggering test matches what the user actually experiences. + +While it runs, periodically tail the output to give the user updates on which iteration it's on and what the scores look like. + +This handles the full optimization loop automatically. It splits the eval set into 60% train and 40% held-out test, evaluates the current description (running each query 3 times to get a reliable trigger rate), then calls Claude to propose improvements based on what failed. It re-evaluates each new description on both train and test, iterating up to 5 times. When it's done, it opens an HTML report in the browser showing the results per iteration and returns JSON with `best_description` — selected by test score rather than train score to avoid overfitting. + +### How skill triggering works + +Understanding the triggering mechanism helps design better eval queries. Skills appear in Claude's `available_skills` list with their name + description, and Claude decides whether to consult a skill based on that description. The important thing to know is that Claude only consults skills for tasks it can't easily handle on its own — simple, one-step queries like "read this PDF" may not trigger a skill even if the description matches perfectly, because Claude can handle them directly with basic tools. Complex, multi-step, or specialized queries reliably trigger skills when the description matches. + +This means your eval queries should be substantive enough that Claude would actually benefit from consulting a skill. Simple queries like "read file X" are poor test cases — they won't trigger skills regardless of description quality. + +### Step 4: Apply the result + +Take `best_description` from the JSON output and update the skill's SKILL.md frontmatter. Show the user before/after and report the scores. + +--- + +### Package and Present (only if `present_files` tool is available) + +Check whether you have access to the `present_files` tool. If you don't, skip this step. If you do, package the skill and present the .skill file to the user: + +```bash +python -m scripts.package_skill +``` + +After packaging, direct the user to the resulting `.skill` file path so they can install it. + +--- + +## Claude.ai-specific instructions + +In Claude.ai, the core workflow is the same (draft → test → review → improve → repeat), but because Claude.ai doesn't have subagents, some mechanics change. Here's what to adapt: + +**Running test cases**: No subagents means no parallel execution. For each test case, read the skill's SKILL.md, then follow its instructions to accomplish the test prompt yourself. Do them one at a time. This is less rigorous than independent subagents (you wrote the skill and you're also running it, so you have full context), but it's a useful sanity check — and the human review step compensates. Skip the baseline runs — just use the skill to complete the task as requested. + +**Reviewing results**: If you can't open a browser (e.g., Claude.ai's VM has no display, or you're on a remote server), skip the browser reviewer entirely. Instead, present results directly in the conversation. For each test case, show the prompt and the output. If the output is a file the user needs to see (like a .docx or .xlsx), save it to the filesystem and tell them where it is so they can download and inspect it. Ask for feedback inline: "How does this look? Anything you'd change?" + +**Benchmarking**: Skip the quantitative benchmarking — it relies on baseline comparisons which aren't meaningful without subagents. Focus on qualitative feedback from the user. + +**The iteration loop**: Same as before — improve the skill, rerun the test cases, ask for feedback — just without the browser reviewer in the middle. You can still organize results into iteration directories on the filesystem if you have one. + +**Description optimization**: This section requires the `claude` CLI tool (specifically `claude -p`) which is only available in Claude Code. Skip it if you're on Claude.ai. + +**Blind comparison**: Requires subagents. Skip it. + +**Packaging**: The `package_skill.py` script works anywhere with Python and a filesystem. On Claude.ai, you can run it and the user can download the resulting `.skill` file. + +**Updating an existing skill**: The user might be asking you to update an existing skill, not create a new one. In this case: + +- **Preserve the original name.** Note the skill's directory name and `name` frontmatter field -- use them unchanged. E.g., if the installed skill is `research-helper`, output `research-helper.skill` (not `research-helper-v2`). +- **Copy to a writeable location before editing.** The installed skill path may be read-only. Copy to `/tmp/skill-name/`, edit there, and package from the copy. +- **If packaging manually, stage in `/tmp/` first**, then copy to the output directory -- direct writes may fail due to permissions. + +--- + +## Cowork-Specific Instructions + +If you're in Cowork, the main things to know are: + +- You have subagents, so the main workflow (spawn test cases in parallel, run baselines, grade, etc.) all works. (However, if you run into severe problems with timeouts, it's OK to run the test prompts in series rather than parallel.) +- You don't have a browser or display, so when generating the eval viewer, use `--static ` to write a standalone HTML file instead of starting a server. Then proffer a link that the user can click to open the HTML in their browser. +- For whatever reason, the Cowork setup seems to disincline Claude from generating the eval viewer after running the tests, so just to reiterate: whether you're in Cowork or in Claude Code, after running tests, you should always generate the eval viewer for the human to look at examples before revising the skill yourself and trying to make corrections, using `generate_review.py` (not writing your own boutique html code). Sorry in advance but I'm gonna go all caps here: GENERATE THE EVAL VIEWER _BEFORE_ evaluating inputs yourself. You want to get them in front of the human ASAP! +- Feedback works differently: since there's no running server, the viewer's "Submit All Reviews" button will download `feedback.json` as a file. You can then read it from there (you may have to request access first). +- Packaging works — `package_skill.py` just needs Python and a filesystem. +- Description optimization (`run_loop.py` / `run_eval.py`) should work in Cowork just fine since it uses `claude -p` via subprocess, not a browser, but please save it until you've fully finished making the skill and the user agrees it's in good shape. +- **Updating an existing skill**: The user might be asking you to update an existing skill, not create a new one. Follow the update guidance in the claude.ai section above. + +--- + +## Reference files + +The agents/ directory contains instructions for specialized subagents. Read them when you need to spawn the relevant subagent. + +- `agents/grader.md` — How to evaluate assertions against outputs +- `agents/comparator.md` — How to do blind A/B comparison between two outputs +- `agents/analyzer.md` — How to analyze why one version beat another + +The references/ directory has additional documentation: + +- `references/schemas.md` — JSON structures for evals.json, grading.json, etc. + +--- + +Repeating one more time the core loop here for emphasis: + +- Figure out what the skill is about +- Draft or edit the skill +- Run claude-with-access-to-the-skill on test prompts +- With the user, evaluate the outputs: + - Create benchmark.json and run `eval-viewer/generate_review.py` to help the user review them + - Run quantitative evals +- Repeat until you and the user are satisfied +- Package the final skill and return it to the user. + +Please add steps to your TodoList, if you have such a thing, to make sure you don't forget. If you're in Cowork, please specifically put "Create evals JSON and run `eval-viewer/generate_review.py` so human can review test cases" in your TodoList to make sure it happens. + +Good luck! diff --git a/.agents/skills/skill-creator/agents/analyzer.md b/.agents/skills/skill-creator/agents/analyzer.md new file mode 100644 index 000000000000..bd9e6d67c8b3 --- /dev/null +++ b/.agents/skills/skill-creator/agents/analyzer.md @@ -0,0 +1,283 @@ +# Post-hoc Analyzer Agent + +Analyze blind comparison results to understand WHY the winner won and generate improvement suggestions. + +## Role + +After the blind comparator determines a winner, the Post-hoc Analyzer "unblids" the results by examining the skills and transcripts. The goal is to extract actionable insights: what made the winner better, and how can the loser be improved? + +## Inputs + +You receive these parameters in your prompt: + +- **winner**: "A" or "B" (from blind comparison) +- **winner_skill_path**: Path to the skill that produced the winning output +- **winner_transcript_path**: Path to the execution transcript for the winner +- **loser_skill_path**: Path to the skill that produced the losing output +- **loser_transcript_path**: Path to the execution transcript for the loser +- **comparison_result_path**: Path to the blind comparator's output JSON +- **output_path**: Where to save the analysis results + +## Process + +### Step 1: Read Comparison Result + +1. Read the blind comparator's output at comparison_result_path +2. Note the winning side (A or B), the reasoning, and any scores +3. Understand what the comparator valued in the winning output + +### Step 2: Read Both Skills + +1. Read the winner skill's SKILL.md and key referenced files +2. Read the loser skill's SKILL.md and key referenced files +3. Identify structural differences: + - Instructions clarity and specificity + - Script/tool usage patterns + - Example coverage + - Edge case handling + +### Step 3: Read Both Transcripts + +1. Read the winner's transcript +2. Read the loser's transcript +3. Compare execution patterns: + - How closely did each follow their skill's instructions? + - What tools were used differently? + - Where did the loser diverge from optimal behavior? + - Did either encounter errors or make recovery attempts? + +### Step 4: Analyze Instruction Following + +For each transcript, evaluate: + +- Did the agent follow the skill's explicit instructions? +- Did the agent use the skill's provided tools/scripts? +- Were there missed opportunities to leverage skill content? +- Did the agent add unnecessary steps not in the skill? + +Score instruction following 1-10 and note specific issues. + +### Step 5: Identify Winner Strengths + +Determine what made the winner better: + +- Clearer instructions that led to better behavior? +- Better scripts/tools that produced better output? +- More comprehensive examples that guided edge cases? +- Better error handling guidance? + +Be specific. Quote from skills/transcripts where relevant. + +### Step 6: Identify Loser Weaknesses + +Determine what held the loser back: + +- Ambiguous instructions that led to suboptimal choices? +- Missing tools/scripts that forced workarounds? +- Gaps in edge case coverage? +- Poor error handling that caused failures? + +### Step 7: Generate Improvement Suggestions + +Based on the analysis, produce actionable suggestions for improving the loser skill: + +- Specific instruction changes to make +- Tools/scripts to add or modify +- Examples to include +- Edge cases to address + +Prioritize by impact. Focus on changes that would have changed the outcome. + +### Step 8: Write Analysis Results + +Save structured analysis to `{output_path}`. + +## Output Format + +Write a JSON file with this structure: + +```json +{ + "comparison_summary": { + "winner": "A", + "winner_skill": "path/to/winner/skill", + "loser_skill": "path/to/loser/skill", + "comparator_reasoning": "Brief summary of why comparator chose winner" + }, + "winner_strengths": [ + "Clear step-by-step instructions for handling multi-page documents", + "Included validation script that caught formatting errors", + "Explicit guidance on fallback behavior when OCR fails" + ], + "loser_weaknesses": [ + "Vague instruction 'process the document appropriately' led to inconsistent behavior", + "No script for validation, agent had to improvise and made errors", + "No guidance on OCR failure, agent gave up instead of trying alternatives" + ], + "instruction_following": { + "winner": { + "score": 9, + "issues": ["Minor: skipped optional logging step"] + }, + "loser": { + "score": 6, + "issues": [ + "Did not use the skill's formatting template", + "Invented own approach instead of following step 3", + "Missed the 'always validate output' instruction" + ] + } + }, + "improvement_suggestions": [ + { + "priority": "high", + "category": "instructions", + "suggestion": "Replace 'process the document appropriately' with explicit steps: 1) Extract text, 2) Identify sections, 3) Format per template", + "expected_impact": "Would eliminate ambiguity that caused inconsistent behavior" + }, + { + "priority": "high", + "category": "tools", + "suggestion": "Add validate_output.py script similar to winner skill's validation approach", + "expected_impact": "Would catch formatting errors before final output" + }, + { + "priority": "medium", + "category": "error_handling", + "suggestion": "Add fallback instructions: 'If OCR fails, try: 1) different resolution, 2) image preprocessing, 3) manual extraction'", + "expected_impact": "Would prevent early failure on difficult documents" + } + ], + "transcript_insights": { + "winner_execution_pattern": "Read skill -> Followed 5-step process -> Used validation script -> Fixed 2 issues -> Produced output", + "loser_execution_pattern": "Read skill -> Unclear on approach -> Tried 3 different methods -> No validation -> Output had errors" + } +} +``` + +## Guidelines + +- **Be specific**: Quote from skills and transcripts, don't just say "instructions were unclear" +- **Be actionable**: Suggestions should be concrete changes, not vague advice +- **Focus on skill improvements**: The goal is to improve the losing skill, not critique the agent +- **Prioritize by impact**: Which changes would most likely have changed the outcome? +- **Consider causation**: Did the skill weakness actually cause the worse output, or is it incidental? +- **Stay objective**: Analyze what happened, don't editorialize +- **Think about generalization**: Would this improvement help on other evals too? + +## Categories for Suggestions + +Use these categories to organize improvement suggestions: + +| Category | Description | +| ---------------- | ---------------------------------------------- | +| `instructions` | Changes to the skill's prose instructions | +| `tools` | Scripts, templates, or utilities to add/modify | +| `examples` | Example inputs/outputs to include | +| `error_handling` | Guidance for handling failures | +| `structure` | Reorganization of skill content | +| `references` | External docs or resources to add | + +## Priority Levels + +- **high**: Would likely change the outcome of this comparison +- **medium**: Would improve quality but may not change win/loss +- **low**: Nice to have, marginal improvement + +--- + +# Analyzing Benchmark Results + +When analyzing benchmark results, the analyzer's purpose is to **surface patterns and anomalies** across multiple runs, not suggest skill improvements. + +## Role + +Review all benchmark run results and generate freeform notes that help the user understand skill performance. Focus on patterns that wouldn't be visible from aggregate metrics alone. + +## Inputs + +You receive these parameters in your prompt: + +- **benchmark_data_path**: Path to the in-progress benchmark.json with all run results +- **skill_path**: Path to the skill being benchmarked +- **output_path**: Where to save the notes (as JSON array of strings) + +## Process + +### Step 1: Read Benchmark Data + +1. Read the benchmark.json containing all run results +2. Note the configurations tested (with_skill, without_skill) +3. Understand the run_summary aggregates already calculated + +### Step 2: Analyze Per-Assertion Patterns + +For each expectation across all runs: + +- Does it **always pass** in both configurations? (may not differentiate skill value) +- Does it **always fail** in both configurations? (may be broken or beyond capability) +- Does it **always pass with skill but fail without**? (skill clearly adds value here) +- Does it **always fail with skill but pass without**? (skill may be hurting) +- Is it **highly variable**? (flaky expectation or non-deterministic behavior) + +### Step 3: Analyze Cross-Eval Patterns + +Look for patterns across evals: + +- Are certain eval types consistently harder/easier? +- Do some evals show high variance while others are stable? +- Are there surprising results that contradict expectations? + +### Step 4: Analyze Metrics Patterns + +Look at time_seconds, tokens, tool_calls: + +- Does the skill significantly increase execution time? +- Is there high variance in resource usage? +- Are there outlier runs that skew the aggregates? + +### Step 5: Generate Notes + +Write freeform observations as a list of strings. Each note should: + +- State a specific observation +- Be grounded in the data (not speculation) +- Help the user understand something the aggregate metrics don't show + +Examples: + +- "Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value" +- "Eval 3 shows high variance (50% ± 40%) - run 2 had an unusual failure that may be flaky" +- "Without-skill runs consistently fail on table extraction expectations (0% pass rate)" +- "Skill adds 13s average execution time but improves pass rate by 50%" +- "Token usage is 80% higher with skill, primarily due to script output parsing" +- "All 3 without-skill runs for eval 1 produced empty output" + +### Step 6: Write Notes + +Save notes to `{output_path}` as a JSON array of strings: + +```json +[ + "Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value", + "Eval 3 shows high variance (50% ± 40%) - run 2 had an unusual failure", + "Without-skill runs consistently fail on table extraction expectations", + "Skill adds 13s average execution time but improves pass rate by 50%" +] +``` + +## Guidelines + +**DO:** + +- Report what you observe in the data +- Be specific about which evals, expectations, or runs you're referring to +- Note patterns that aggregate metrics would hide +- Provide context that helps interpret the numbers + +**DO NOT:** + +- Suggest improvements to the skill (that's for the improvement step, not benchmarking) +- Make subjective quality judgments ("the output was good/bad") +- Speculate about causes without evidence +- Repeat information already in the run_summary aggregates diff --git a/.agents/skills/skill-creator/agents/comparator.md b/.agents/skills/skill-creator/agents/comparator.md new file mode 100644 index 000000000000..990f9960ecd7 --- /dev/null +++ b/.agents/skills/skill-creator/agents/comparator.md @@ -0,0 +1,203 @@ +# Blind Comparator Agent + +Compare two outputs WITHOUT knowing which skill produced them. + +## Role + +The Blind Comparator judges which output better accomplishes the eval task. You receive two outputs labeled A and B, but you do NOT know which skill produced which. This prevents bias toward a particular skill or approach. + +Your judgment is based purely on output quality and task completion. + +## Inputs + +You receive these parameters in your prompt: + +- **output_a_path**: Path to the first output file or directory +- **output_b_path**: Path to the second output file or directory +- **eval_prompt**: The original task/prompt that was executed +- **expectations**: List of expectations to check (optional - may be empty) + +## Process + +### Step 1: Read Both Outputs + +1. Examine output A (file or directory) +2. Examine output B (file or directory) +3. Note the type, structure, and content of each +4. If outputs are directories, examine all relevant files inside + +### Step 2: Understand the Task + +1. Read the eval_prompt carefully +2. Identify what the task requires: + - What should be produced? + - What qualities matter (accuracy, completeness, format)? + - What would distinguish a good output from a poor one? + +### Step 3: Generate Evaluation Rubric + +Based on the task, generate a rubric with two dimensions: + +**Content Rubric** (what the output contains): +| Criterion | 1 (Poor) | 3 (Acceptable) | 5 (Excellent) | +|-----------|----------|----------------|---------------| +| Correctness | Major errors | Minor errors | Fully correct | +| Completeness | Missing key elements | Mostly complete | All elements present | +| Accuracy | Significant inaccuracies | Minor inaccuracies | Accurate throughout | + +**Structure Rubric** (how the output is organized): +| Criterion | 1 (Poor) | 3 (Acceptable) | 5 (Excellent) | +|-----------|----------|----------------|---------------| +| Organization | Disorganized | Reasonably organized | Clear, logical structure | +| Formatting | Inconsistent/broken | Mostly consistent | Professional, polished | +| Usability | Difficult to use | Usable with effort | Easy to use | + +Adapt criteria to the specific task. For example: + +- PDF form → "Field alignment", "Text readability", "Data placement" +- Document → "Section structure", "Heading hierarchy", "Paragraph flow" +- Data output → "Schema correctness", "Data types", "Completeness" + +### Step 4: Evaluate Each Output Against the Rubric + +For each output (A and B): + +1. **Score each criterion** on the rubric (1-5 scale) +2. **Calculate dimension totals**: Content score, Structure score +3. **Calculate overall score**: Average of dimension scores, scaled to 1-10 + +### Step 5: Check Assertions (if provided) + +If expectations are provided: + +1. Check each expectation against output A +2. Check each expectation against output B +3. Count pass rates for each output +4. Use expectation scores as secondary evidence (not the primary decision factor) + +### Step 6: Determine the Winner + +Compare A and B based on (in priority order): + +1. **Primary**: Overall rubric score (content + structure) +2. **Secondary**: Assertion pass rates (if applicable) +3. **Tiebreaker**: If truly equal, declare a TIE + +Be decisive - ties should be rare. One output is usually better, even if marginally. + +### Step 7: Write Comparison Results + +Save results to a JSON file at the path specified (or `comparison.json` if not specified). + +## Output Format + +Write a JSON file with this structure: + +```json +{ + "winner": "A", + "reasoning": "Output A provides a complete solution with proper formatting and all required fields. Output B is missing the date field and has formatting inconsistencies.", + "rubric": { + "A": { + "content": { + "correctness": 5, + "completeness": 5, + "accuracy": 4 + }, + "structure": { + "organization": 4, + "formatting": 5, + "usability": 4 + }, + "content_score": 4.7, + "structure_score": 4.3, + "overall_score": 9.0 + }, + "B": { + "content": { + "correctness": 3, + "completeness": 2, + "accuracy": 3 + }, + "structure": { + "organization": 3, + "formatting": 2, + "usability": 3 + }, + "content_score": 2.7, + "structure_score": 2.7, + "overall_score": 5.4 + } + }, + "output_quality": { + "A": { + "score": 9, + "strengths": ["Complete solution", "Well-formatted", "All fields present"], + "weaknesses": ["Minor style inconsistency in header"] + }, + "B": { + "score": 5, + "strengths": ["Readable output", "Correct basic structure"], + "weaknesses": ["Missing date field", "Formatting inconsistencies", "Partial data extraction"] + } + }, + "expectation_results": { + "A": { + "passed": 4, + "total": 5, + "pass_rate": 0.8, + "details": [ + { "text": "Output includes name", "passed": true }, + { "text": "Output includes date", "passed": true }, + { "text": "Format is PDF", "passed": true }, + { "text": "Contains signature", "passed": false }, + { "text": "Readable text", "passed": true } + ] + }, + "B": { + "passed": 3, + "total": 5, + "pass_rate": 0.6, + "details": [ + { "text": "Output includes name", "passed": true }, + { "text": "Output includes date", "passed": false }, + { "text": "Format is PDF", "passed": true }, + { "text": "Contains signature", "passed": false }, + { "text": "Readable text", "passed": true } + ] + } + } +} +``` + +If no expectations were provided, omit the `expectation_results` field entirely. + +## Field Descriptions + +- **winner**: "A", "B", or "TIE" +- **reasoning**: Clear explanation of why the winner was chosen (or why it's a tie) +- **rubric**: Structured rubric evaluation for each output + - **content**: Scores for content criteria (correctness, completeness, accuracy) + - **structure**: Scores for structure criteria (organization, formatting, usability) + - **content_score**: Average of content criteria (1-5) + - **structure_score**: Average of structure criteria (1-5) + - **overall_score**: Combined score scaled to 1-10 +- **output_quality**: Summary quality assessment + - **score**: 1-10 rating (should match rubric overall_score) + - **strengths**: List of positive aspects + - **weaknesses**: List of issues or shortcomings +- **expectation_results**: (Only if expectations provided) + - **passed**: Number of expectations that passed + - **total**: Total number of expectations + - **pass_rate**: Fraction passed (0.0 to 1.0) + - **details**: Individual expectation results + +## Guidelines + +- **Stay blind**: DO NOT try to infer which skill produced which output. Judge purely on output quality. +- **Be specific**: Cite specific examples when explaining strengths and weaknesses. +- **Be decisive**: Choose a winner unless outputs are genuinely equivalent. +- **Output quality first**: Assertion scores are secondary to overall task completion. +- **Be objective**: Don't favor outputs based on style preferences; focus on correctness and completeness. +- **Explain your reasoning**: The reasoning field should make it clear why you chose the winner. +- **Handle edge cases**: If both outputs fail, pick the one that fails less badly. If both are excellent, pick the one that's marginally better. diff --git a/.agents/skills/skill-creator/agents/grader.md b/.agents/skills/skill-creator/agents/grader.md new file mode 100644 index 000000000000..ba7a31e57edb --- /dev/null +++ b/.agents/skills/skill-creator/agents/grader.md @@ -0,0 +1,227 @@ +# Grader Agent + +Evaluate expectations against an execution transcript and outputs. + +## Role + +The Grader reviews a transcript and output files, then determines whether each expectation passes or fails. Provide clear evidence for each judgment. + +You have two jobs: grade the outputs, and critique the evals themselves. A passing grade on a weak assertion is worse than useless — it creates false confidence. When you notice an assertion that's trivially satisfied, or an important outcome that no assertion checks, say so. + +## Inputs + +You receive these parameters in your prompt: + +- **expectations**: List of expectations to evaluate (strings) +- **transcript_path**: Path to the execution transcript (markdown file) +- **outputs_dir**: Directory containing output files from execution + +## Process + +### Step 1: Read the Transcript + +1. Read the transcript file completely +2. Note the eval prompt, execution steps, and final result +3. Identify any issues or errors documented + +### Step 2: Examine Output Files + +1. List files in outputs_dir +2. Read/examine each file relevant to the expectations. If outputs aren't plain text, use the inspection tools provided in your prompt — don't rely solely on what the transcript says the executor produced. +3. Note contents, structure, and quality + +### Step 3: Evaluate Each Assertion + +For each expectation: + +1. **Search for evidence** in the transcript and outputs +2. **Determine verdict**: + - **PASS**: Clear evidence the expectation is true AND the evidence reflects genuine task completion, not just surface-level compliance + - **FAIL**: No evidence, or evidence contradicts the expectation, or the evidence is superficial (e.g., correct filename but empty/wrong content) +3. **Cite the evidence**: Quote the specific text or describe what you found + +### Step 4: Extract and Verify Claims + +Beyond the predefined expectations, extract implicit claims from the outputs and verify them: + +1. **Extract claims** from the transcript and outputs: + - Factual statements ("The form has 12 fields") + - Process claims ("Used pypdf to fill the form") + - Quality claims ("All fields were filled correctly") + +2. **Verify each claim**: + - **Factual claims**: Can be checked against the outputs or external sources + - **Process claims**: Can be verified from the transcript + - **Quality claims**: Evaluate whether the claim is justified + +3. **Flag unverifiable claims**: Note claims that cannot be verified with available information + +This catches issues that predefined expectations might miss. + +### Step 5: Read User Notes + +If `{outputs_dir}/user_notes.md` exists: + +1. Read it and note any uncertainties or issues flagged by the executor +2. Include relevant concerns in the grading output +3. These may reveal problems even when expectations pass + +### Step 6: Critique the Evals + +After grading, consider whether the evals themselves could be improved. Only surface suggestions when there's a clear gap. + +Good suggestions test meaningful outcomes — assertions that are hard to satisfy without actually doing the work correctly. Think about what makes an assertion _discriminating_: it passes when the skill genuinely succeeds and fails when it doesn't. + +Suggestions worth raising: + +- An assertion that passed but would also pass for a clearly wrong output (e.g., checking filename existence but not file content) +- An important outcome you observed — good or bad — that no assertion covers at all +- An assertion that can't actually be verified from the available outputs + +Keep the bar high. The goal is to flag things the eval author would say "good catch" about, not to nitpick every assertion. + +### Step 7: Write Grading Results + +Save results to `{outputs_dir}/../grading.json` (sibling to outputs_dir). + +## Grading Criteria + +**PASS when**: + +- The transcript or outputs clearly demonstrate the expectation is true +- Specific evidence can be cited +- The evidence reflects genuine substance, not just surface compliance (e.g., a file exists AND contains correct content, not just the right filename) + +**FAIL when**: + +- No evidence found for the expectation +- Evidence contradicts the expectation +- The expectation cannot be verified from available information +- The evidence is superficial — the assertion is technically satisfied but the underlying task outcome is wrong or incomplete +- The output appears to meet the assertion by coincidence rather than by actually doing the work + +**When uncertain**: The burden of proof to pass is on the expectation. + +### Step 8: Read Executor Metrics and Timing + +1. If `{outputs_dir}/metrics.json` exists, read it and include in grading output +2. If `{outputs_dir}/../timing.json` exists, read it and include timing data + +## Output Format + +Write a JSON file with this structure: + +```json +{ + "expectations": [ + { + "text": "The output includes the name 'John Smith'", + "passed": true, + "evidence": "Found in transcript Step 3: 'Extracted names: John Smith, Sarah Johnson'" + }, + { + "text": "The spreadsheet has a SUM formula in cell B10", + "passed": false, + "evidence": "No spreadsheet was created. The output was a text file." + }, + { + "text": "The assistant used the skill's OCR script", + "passed": true, + "evidence": "Transcript Step 2 shows: 'Tool: Bash - python ocr_script.py image.png'" + } + ], + "summary": { + "passed": 2, + "failed": 1, + "total": 3, + "pass_rate": 0.67 + }, + "execution_metrics": { + "tool_calls": { + "Read": 5, + "Write": 2, + "Bash": 8 + }, + "total_tool_calls": 15, + "total_steps": 6, + "errors_encountered": 0, + "output_chars": 12450, + "transcript_chars": 3200 + }, + "timing": { + "executor_duration_seconds": 165.0, + "grader_duration_seconds": 26.0, + "total_duration_seconds": 191.0 + }, + "claims": [ + { + "claim": "The form has 12 fillable fields", + "type": "factual", + "verified": true, + "evidence": "Counted 12 fields in field_info.json" + }, + { + "claim": "All required fields were populated", + "type": "quality", + "verified": false, + "evidence": "Reference section was left blank despite data being available" + } + ], + "user_notes_summary": { + "uncertainties": ["Used 2023 data, may be stale"], + "needs_review": [], + "workarounds": ["Fell back to text overlay for non-fillable fields"] + }, + "eval_feedback": { + "suggestions": [ + { + "assertion": "The output includes the name 'John Smith'", + "reason": "A hallucinated document that mentions the name would also pass — consider checking it appears as the primary contact with matching phone and email from the input" + }, + { + "reason": "No assertion checks whether the extracted phone numbers match the input — I observed incorrect numbers in the output that went uncaught" + } + ], + "overall": "Assertions check presence but not correctness. Consider adding content verification." + } +} +``` + +## Field Descriptions + +- **expectations**: Array of graded expectations + - **text**: The original expectation text + - **passed**: Boolean - true if expectation passes + - **evidence**: Specific quote or description supporting the verdict +- **summary**: Aggregate statistics + - **passed**: Count of passed expectations + - **failed**: Count of failed expectations + - **total**: Total expectations evaluated + - **pass_rate**: Fraction passed (0.0 to 1.0) +- **execution_metrics**: Copied from executor's metrics.json (if available) + - **output_chars**: Total character count of output files (proxy for tokens) + - **transcript_chars**: Character count of transcript +- **timing**: Wall clock timing from timing.json (if available) + - **executor_duration_seconds**: Time spent in executor subagent + - **total_duration_seconds**: Total elapsed time for the run +- **claims**: Extracted and verified claims from the output + - **claim**: The statement being verified + - **type**: "factual", "process", or "quality" + - **verified**: Boolean - whether the claim holds + - **evidence**: Supporting or contradicting evidence +- **user_notes_summary**: Issues flagged by the executor + - **uncertainties**: Things the executor wasn't sure about + - **needs_review**: Items requiring human attention + - **workarounds**: Places where the skill didn't work as expected +- **eval_feedback**: Improvement suggestions for the evals (only when warranted) + - **suggestions**: List of concrete suggestions, each with a `reason` and optionally an `assertion` it relates to + - **overall**: Brief assessment — can be "No suggestions, evals look solid" if nothing to flag + +## Guidelines + +- **Be objective**: Base verdicts on evidence, not assumptions +- **Be specific**: Quote the exact text that supports your verdict +- **Be thorough**: Check both transcript and output files +- **Be consistent**: Apply the same standard to each expectation +- **Explain failures**: Make it clear why evidence was insufficient +- **No partial credit**: Each expectation is pass or fail, not partial diff --git a/.agents/skills/skill-creator/assets/eval_review.html b/.agents/skills/skill-creator/assets/eval_review.html new file mode 100644 index 000000000000..cab585030eaf --- /dev/null +++ b/.agents/skills/skill-creator/assets/eval_review.html @@ -0,0 +1,287 @@ + + + + + + Eval Set Review - __SKILL_NAME_PLACEHOLDER__ + + + + + + +

Eval Set Review: __SKILL_NAME_PLACEHOLDER__

+

Current description: __SKILL_DESCRIPTION_PLACEHOLDER__

+ +
+ + +
+ + + + + + + + + + +
QueryShould TriggerActions
+ +

+ + + + diff --git a/.agents/skills/skill-creator/eval-viewer/generate_review.py b/.agents/skills/skill-creator/eval-viewer/generate_review.py new file mode 100644 index 000000000000..7fa5978631fe --- /dev/null +++ b/.agents/skills/skill-creator/eval-viewer/generate_review.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python3 +"""Generate and serve a review page for eval results. + +Reads the workspace directory, discovers runs (directories with outputs/), +embeds all output data into a self-contained HTML page, and serves it via +a tiny HTTP server. Feedback auto-saves to feedback.json in the workspace. + +Usage: + python generate_review.py [--port PORT] [--skill-name NAME] + python generate_review.py --previous-feedback /path/to/old/feedback.json + +No dependencies beyond the Python stdlib are required. +""" + +import argparse +import base64 +import json +import mimetypes +import os +import re +import signal +import subprocess +import sys +import time +import webbrowser +from functools import partial +from http.server import HTTPServer, BaseHTTPRequestHandler +from pathlib import Path + +# Files to exclude from output listings +METADATA_FILES = {"transcript.md", "user_notes.md", "metrics.json"} + +# Extensions we render as inline text +TEXT_EXTENSIONS = { + ".txt", ".md", ".json", ".csv", ".py", ".js", ".ts", ".tsx", ".jsx", + ".yaml", ".yml", ".xml", ".html", ".css", ".sh", ".rb", ".go", ".rs", + ".java", ".c", ".cpp", ".h", ".hpp", ".sql", ".r", ".toml", +} + +# Extensions we render as inline images +IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".svg", ".webp"} + +# MIME type overrides for common types +MIME_OVERRIDES = { + ".svg": "image/svg+xml", + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", +} + + +def get_mime_type(path: Path) -> str: + ext = path.suffix.lower() + if ext in MIME_OVERRIDES: + return MIME_OVERRIDES[ext] + mime, _ = mimetypes.guess_type(str(path)) + return mime or "application/octet-stream" + + +def find_runs(workspace: Path) -> list[dict]: + """Recursively find directories that contain an outputs/ subdirectory.""" + runs: list[dict] = [] + _find_runs_recursive(workspace, workspace, runs) + runs.sort(key=lambda r: (r.get("eval_id", float("inf")), r["id"])) + return runs + + +def _find_runs_recursive(root: Path, current: Path, runs: list[dict]) -> None: + if not current.is_dir(): + return + + outputs_dir = current / "outputs" + if outputs_dir.is_dir(): + run = build_run(root, current) + if run: + runs.append(run) + return + + skip = {"node_modules", ".git", "__pycache__", "skill", "inputs"} + for child in sorted(current.iterdir()): + if child.is_dir() and child.name not in skip: + _find_runs_recursive(root, child, runs) + + +def build_run(root: Path, run_dir: Path) -> dict | None: + """Build a run dict with prompt, outputs, and grading data.""" + prompt = "" + eval_id = None + + # Try eval_metadata.json + for candidate in [run_dir / "eval_metadata.json", run_dir.parent / "eval_metadata.json"]: + if candidate.exists(): + try: + metadata = json.loads(candidate.read_text()) + prompt = metadata.get("prompt", "") + eval_id = metadata.get("eval_id") + except (json.JSONDecodeError, OSError): + pass + if prompt: + break + + # Fall back to transcript.md + if not prompt: + for candidate in [run_dir / "transcript.md", run_dir / "outputs" / "transcript.md"]: + if candidate.exists(): + try: + text = candidate.read_text() + match = re.search(r"## Eval Prompt\n\n([\s\S]*?)(?=\n##|$)", text) + if match: + prompt = match.group(1).strip() + except OSError: + pass + if prompt: + break + + if not prompt: + prompt = "(No prompt found)" + + run_id = str(run_dir.relative_to(root)).replace("/", "-").replace("\\", "-") + + # Collect output files + outputs_dir = run_dir / "outputs" + output_files: list[dict] = [] + if outputs_dir.is_dir(): + for f in sorted(outputs_dir.iterdir()): + if f.is_file() and f.name not in METADATA_FILES: + output_files.append(embed_file(f)) + + # Load grading if present + grading = None + for candidate in [run_dir / "grading.json", run_dir.parent / "grading.json"]: + if candidate.exists(): + try: + grading = json.loads(candidate.read_text()) + except (json.JSONDecodeError, OSError): + pass + if grading: + break + + return { + "id": run_id, + "prompt": prompt, + "eval_id": eval_id, + "outputs": output_files, + "grading": grading, + } + + +def embed_file(path: Path) -> dict: + """Read a file and return an embedded representation.""" + ext = path.suffix.lower() + mime = get_mime_type(path) + + if ext in TEXT_EXTENSIONS: + try: + content = path.read_text(errors="replace") + except OSError: + content = "(Error reading file)" + return { + "name": path.name, + "type": "text", + "content": content, + } + elif ext in IMAGE_EXTENSIONS: + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "image", + "mime": mime, + "data_uri": f"data:{mime};base64,{b64}", + } + elif ext == ".pdf": + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "pdf", + "data_uri": f"data:{mime};base64,{b64}", + } + elif ext == ".xlsx": + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "xlsx", + "data_b64": b64, + } + else: + # Binary / unknown — base64 download link + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "binary", + "mime": mime, + "data_uri": f"data:{mime};base64,{b64}", + } + + +def load_previous_iteration(workspace: Path) -> dict[str, dict]: + """Load previous iteration's feedback and outputs. + + Returns a map of run_id -> {"feedback": str, "outputs": list[dict]}. + """ + result: dict[str, dict] = {} + + # Load feedback + feedback_map: dict[str, str] = {} + feedback_path = workspace / "feedback.json" + if feedback_path.exists(): + try: + data = json.loads(feedback_path.read_text()) + feedback_map = { + r["run_id"]: r["feedback"] + for r in data.get("reviews", []) + if r.get("feedback", "").strip() + } + except (json.JSONDecodeError, OSError, KeyError): + pass + + # Load runs (to get outputs) + prev_runs = find_runs(workspace) + for run in prev_runs: + result[run["id"]] = { + "feedback": feedback_map.get(run["id"], ""), + "outputs": run.get("outputs", []), + } + + # Also add feedback for run_ids that had feedback but no matching run + for run_id, fb in feedback_map.items(): + if run_id not in result: + result[run_id] = {"feedback": fb, "outputs": []} + + return result + + +def generate_html( + runs: list[dict], + skill_name: str, + previous: dict[str, dict] | None = None, + benchmark: dict | None = None, +) -> str: + """Generate the complete standalone HTML page with embedded data.""" + template_path = Path(__file__).parent / "viewer.html" + template = template_path.read_text() + + # Build previous_feedback and previous_outputs maps for the template + previous_feedback: dict[str, str] = {} + previous_outputs: dict[str, list[dict]] = {} + if previous: + for run_id, data in previous.items(): + if data.get("feedback"): + previous_feedback[run_id] = data["feedback"] + if data.get("outputs"): + previous_outputs[run_id] = data["outputs"] + + embedded = { + "skill_name": skill_name, + "runs": runs, + "previous_feedback": previous_feedback, + "previous_outputs": previous_outputs, + } + if benchmark: + embedded["benchmark"] = benchmark + + data_json = json.dumps(embedded) + + return template.replace("/*__EMBEDDED_DATA__*/", f"const EMBEDDED_DATA = {data_json};") + + +# --------------------------------------------------------------------------- +# HTTP server (stdlib only, zero dependencies) +# --------------------------------------------------------------------------- + +def _kill_port(port: int) -> None: + """Kill any process listening on the given port.""" + try: + result = subprocess.run( + ["lsof", "-ti", f":{port}"], + capture_output=True, text=True, timeout=5, + ) + for pid_str in result.stdout.strip().split("\n"): + if pid_str.strip(): + try: + os.kill(int(pid_str.strip()), signal.SIGTERM) + except (ProcessLookupError, ValueError): + pass + if result.stdout.strip(): + time.sleep(0.5) + except subprocess.TimeoutExpired: + pass + except FileNotFoundError: + print("Note: lsof not found, cannot check if port is in use", file=sys.stderr) + +class ReviewHandler(BaseHTTPRequestHandler): + """Serves the review HTML and handles feedback saves. + + Regenerates the HTML on each page load so that refreshing the browser + picks up new eval outputs without restarting the server. + """ + + def __init__( + self, + workspace: Path, + skill_name: str, + feedback_path: Path, + previous: dict[str, dict], + benchmark_path: Path | None, + *args, + **kwargs, + ): + self.workspace = workspace + self.skill_name = skill_name + self.feedback_path = feedback_path + self.previous = previous + self.benchmark_path = benchmark_path + super().__init__(*args, **kwargs) + + def do_GET(self) -> None: + if self.path == "/" or self.path == "/index.html": + # Regenerate HTML on each request (re-scans workspace for new outputs) + runs = find_runs(self.workspace) + benchmark = None + if self.benchmark_path and self.benchmark_path.exists(): + try: + benchmark = json.loads(self.benchmark_path.read_text()) + except (json.JSONDecodeError, OSError): + pass + html = generate_html(runs, self.skill_name, self.previous, benchmark) + content = html.encode("utf-8") + self.send_response(200) + self.send_header("Content-Type", "text/html; charset=utf-8") + self.send_header("Content-Length", str(len(content))) + self.end_headers() + self.wfile.write(content) + elif self.path == "/api/feedback": + data = b"{}" + if self.feedback_path.exists(): + data = self.feedback_path.read_bytes() + self.send_response(200) + self.send_header("Content-Type", "application/json") + self.send_header("Content-Length", str(len(data))) + self.end_headers() + self.wfile.write(data) + else: + self.send_error(404) + + def do_POST(self) -> None: + if self.path == "/api/feedback": + length = int(self.headers.get("Content-Length", 0)) + body = self.rfile.read(length) + try: + data = json.loads(body) + if not isinstance(data, dict) or "reviews" not in data: + raise ValueError("Expected JSON object with 'reviews' key") + self.feedback_path.write_text(json.dumps(data, indent=2) + "\n") + resp = b'{"ok":true}' + self.send_response(200) + except (json.JSONDecodeError, OSError, ValueError) as e: + resp = json.dumps({"error": str(e)}).encode() + self.send_response(500) + self.send_header("Content-Type", "application/json") + self.send_header("Content-Length", str(len(resp))) + self.end_headers() + self.wfile.write(resp) + else: + self.send_error(404) + + def log_message(self, format: str, *args: object) -> None: + # Suppress request logging to keep terminal clean + pass + + +def main() -> None: + parser = argparse.ArgumentParser(description="Generate and serve eval review") + parser.add_argument("workspace", type=Path, help="Path to workspace directory") + parser.add_argument("--port", "-p", type=int, default=3117, help="Server port (default: 3117)") + parser.add_argument("--skill-name", "-n", type=str, default=None, help="Skill name for header") + parser.add_argument( + "--previous-workspace", type=Path, default=None, + help="Path to previous iteration's workspace (shows old outputs and feedback as context)", + ) + parser.add_argument( + "--benchmark", type=Path, default=None, + help="Path to benchmark.json to show in the Benchmark tab", + ) + parser.add_argument( + "--static", "-s", type=Path, default=None, + help="Write standalone HTML to this path instead of starting a server", + ) + args = parser.parse_args() + + workspace = args.workspace.resolve() + if not workspace.is_dir(): + print(f"Error: {workspace} is not a directory", file=sys.stderr) + sys.exit(1) + + runs = find_runs(workspace) + if not runs: + print(f"No runs found in {workspace}", file=sys.stderr) + sys.exit(1) + + skill_name = args.skill_name or workspace.name.replace("-workspace", "") + feedback_path = workspace / "feedback.json" + + previous: dict[str, dict] = {} + if args.previous_workspace: + previous = load_previous_iteration(args.previous_workspace.resolve()) + + benchmark_path = args.benchmark.resolve() if args.benchmark else None + benchmark = None + if benchmark_path and benchmark_path.exists(): + try: + benchmark = json.loads(benchmark_path.read_text()) + except (json.JSONDecodeError, OSError): + pass + + if args.static: + html = generate_html(runs, skill_name, previous, benchmark) + args.static.parent.mkdir(parents=True, exist_ok=True) + args.static.write_text(html) + print(f"\n Static viewer written to: {args.static}\n") + sys.exit(0) + + # Kill any existing process on the target port + port = args.port + _kill_port(port) + handler = partial(ReviewHandler, workspace, skill_name, feedback_path, previous, benchmark_path) + try: + server = HTTPServer(("127.0.0.1", port), handler) + except OSError: + # Port still in use after kill attempt — find a free one + server = HTTPServer(("127.0.0.1", 0), handler) + port = server.server_address[1] + + url = f"http://localhost:{port}" + print(f"\n Eval Viewer") + print(f" ─────────────────────────────────") + print(f" URL: {url}") + print(f" Workspace: {workspace}") + print(f" Feedback: {feedback_path}") + if previous: + print(f" Previous: {args.previous_workspace} ({len(previous)} runs)") + if benchmark_path: + print(f" Benchmark: {benchmark_path}") + print(f"\n Press Ctrl+C to stop.\n") + + webbrowser.open(url) + + try: + server.serve_forever() + except KeyboardInterrupt: + print("\nStopped.") + server.server_close() + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/eval-viewer/viewer.html b/.agents/skills/skill-creator/eval-viewer/viewer.html new file mode 100644 index 000000000000..06f98d1109f7 --- /dev/null +++ b/.agents/skills/skill-creator/eval-viewer/viewer.html @@ -0,0 +1,1437 @@ + + + + + + Eval Review + + + + + + + +
+
+
+

Eval Review:

+
+ Review each output and leave feedback below. Navigate with arrow keys or buttons. When done, copy feedback + and paste into Claude Code. +
+
+
+
+ + + + + +
+
+ +
+
+ Prompt +
+
+
+
+
+ + +
+
Output
+
+
No output files found
+
+
+ + + + + + + + +
+
Your Feedback
+
+ + + +
+
+
+ + +
+ + + +
+
+
+ No benchmark data available. Run a benchmark to see quantitative results here. +
+
+
+
+ + +
+
+

Review Complete

+

Your feedback has been saved. Go back to your Claude Code session and tell Claude you're done reviewing.

+
+ +
+
+
+ + +
+ + + + diff --git a/.agents/skills/skill-creator/references/schemas.md b/.agents/skills/skill-creator/references/schemas.md new file mode 100644 index 000000000000..5ea33ee1b249 --- /dev/null +++ b/.agents/skills/skill-creator/references/schemas.md @@ -0,0 +1,420 @@ +# JSON Schemas + +This document defines the JSON schemas used by skill-creator. + +--- + +## evals.json + +Defines the evals for a skill. Located at `evals/evals.json` within the skill directory. + +```json +{ + "skill_name": "example-skill", + "evals": [ + { + "id": 1, + "prompt": "User's example prompt", + "expected_output": "Description of expected result", + "files": ["evals/files/sample1.pdf"], + "expectations": ["The output includes X", "The skill used script Y"] + } + ] +} +``` + +**Fields:** + +- `skill_name`: Name matching the skill's frontmatter +- `evals[].id`: Unique integer identifier +- `evals[].prompt`: The task to execute +- `evals[].expected_output`: Human-readable description of success +- `evals[].files`: Optional list of input file paths (relative to skill root) +- `evals[].expectations`: List of verifiable statements + +--- + +## history.json + +Tracks version progression in Improve mode. Located at workspace root. + +```json +{ + "started_at": "2026-01-15T10:30:00Z", + "skill_name": "pdf", + "current_best": "v2", + "iterations": [ + { + "version": "v0", + "parent": null, + "expectation_pass_rate": 0.65, + "grading_result": "baseline", + "is_current_best": false + }, + { + "version": "v1", + "parent": "v0", + "expectation_pass_rate": 0.75, + "grading_result": "won", + "is_current_best": false + }, + { + "version": "v2", + "parent": "v1", + "expectation_pass_rate": 0.85, + "grading_result": "won", + "is_current_best": true + } + ] +} +``` + +**Fields:** + +- `started_at`: ISO timestamp of when improvement started +- `skill_name`: Name of the skill being improved +- `current_best`: Version identifier of the best performer +- `iterations[].version`: Version identifier (v0, v1, ...) +- `iterations[].parent`: Parent version this was derived from +- `iterations[].expectation_pass_rate`: Pass rate from grading +- `iterations[].grading_result`: "baseline", "won", "lost", or "tie" +- `iterations[].is_current_best`: Whether this is the current best version + +--- + +## grading.json + +Output from the grader agent. Located at `/grading.json`. + +```json +{ + "expectations": [ + { + "text": "The output includes the name 'John Smith'", + "passed": true, + "evidence": "Found in transcript Step 3: 'Extracted names: John Smith, Sarah Johnson'" + }, + { + "text": "The spreadsheet has a SUM formula in cell B10", + "passed": false, + "evidence": "No spreadsheet was created. The output was a text file." + } + ], + "summary": { + "passed": 2, + "failed": 1, + "total": 3, + "pass_rate": 0.67 + }, + "execution_metrics": { + "tool_calls": { + "Read": 5, + "Write": 2, + "Bash": 8 + }, + "total_tool_calls": 15, + "total_steps": 6, + "errors_encountered": 0, + "output_chars": 12450, + "transcript_chars": 3200 + }, + "timing": { + "executor_duration_seconds": 165.0, + "grader_duration_seconds": 26.0, + "total_duration_seconds": 191.0 + }, + "claims": [ + { + "claim": "The form has 12 fillable fields", + "type": "factual", + "verified": true, + "evidence": "Counted 12 fields in field_info.json" + } + ], + "user_notes_summary": { + "uncertainties": ["Used 2023 data, may be stale"], + "needs_review": [], + "workarounds": ["Fell back to text overlay for non-fillable fields"] + }, + "eval_feedback": { + "suggestions": [ + { + "assertion": "The output includes the name 'John Smith'", + "reason": "A hallucinated document that mentions the name would also pass" + } + ], + "overall": "Assertions check presence but not correctness." + } +} +``` + +**Fields:** + +- `expectations[]`: Graded expectations with evidence +- `summary`: Aggregate pass/fail counts +- `execution_metrics`: Tool usage and output size (from executor's metrics.json) +- `timing`: Wall clock timing (from timing.json) +- `claims`: Extracted and verified claims from the output +- `user_notes_summary`: Issues flagged by the executor +- `eval_feedback`: (optional) Improvement suggestions for the evals, only present when the grader identifies issues worth raising + +--- + +## metrics.json + +Output from the executor agent. Located at `/outputs/metrics.json`. + +```json +{ + "tool_calls": { + "Read": 5, + "Write": 2, + "Bash": 8, + "Edit": 1, + "Glob": 2, + "Grep": 0 + }, + "total_tool_calls": 18, + "total_steps": 6, + "files_created": ["filled_form.pdf", "field_values.json"], + "errors_encountered": 0, + "output_chars": 12450, + "transcript_chars": 3200 +} +``` + +**Fields:** + +- `tool_calls`: Count per tool type +- `total_tool_calls`: Sum of all tool calls +- `total_steps`: Number of major execution steps +- `files_created`: List of output files created +- `errors_encountered`: Number of errors during execution +- `output_chars`: Total character count of output files +- `transcript_chars`: Character count of transcript + +--- + +## timing.json + +Wall clock timing for a run. Located at `/timing.json`. + +**How to capture:** When a subagent task completes, the task notification includes `total_tokens` and `duration_ms`. Save these immediately — they are not persisted anywhere else and cannot be recovered after the fact. + +```json +{ + "total_tokens": 84852, + "duration_ms": 23332, + "total_duration_seconds": 23.3, + "executor_start": "2026-01-15T10:30:00Z", + "executor_end": "2026-01-15T10:32:45Z", + "executor_duration_seconds": 165.0, + "grader_start": "2026-01-15T10:32:46Z", + "grader_end": "2026-01-15T10:33:12Z", + "grader_duration_seconds": 26.0 +} +``` + +--- + +## benchmark.json + +Output from Benchmark mode. Located at `benchmarks//benchmark.json`. + +```json +{ + "metadata": { + "skill_name": "pdf", + "skill_path": "/path/to/pdf", + "executor_model": "claude-sonnet-4-20250514", + "analyzer_model": "most-capable-model", + "timestamp": "2026-01-15T10:30:00Z", + "evals_run": [1, 2, 3], + "runs_per_configuration": 3 + }, + + "runs": [ + { + "eval_id": 1, + "eval_name": "Ocean", + "configuration": "with_skill", + "run_number": 1, + "result": { + "pass_rate": 0.85, + "passed": 6, + "failed": 1, + "total": 7, + "time_seconds": 42.5, + "tokens": 3800, + "tool_calls": 18, + "errors": 0 + }, + "expectations": [{ "text": "...", "passed": true, "evidence": "..." }], + "notes": ["Used 2023 data, may be stale", "Fell back to text overlay for non-fillable fields"] + } + ], + + "run_summary": { + "with_skill": { + "pass_rate": { "mean": 0.85, "stddev": 0.05, "min": 0.8, "max": 0.9 }, + "time_seconds": { "mean": 45.0, "stddev": 12.0, "min": 32.0, "max": 58.0 }, + "tokens": { "mean": 3800, "stddev": 400, "min": 3200, "max": 4100 } + }, + "without_skill": { + "pass_rate": { "mean": 0.35, "stddev": 0.08, "min": 0.28, "max": 0.45 }, + "time_seconds": { "mean": 32.0, "stddev": 8.0, "min": 24.0, "max": 42.0 }, + "tokens": { "mean": 2100, "stddev": 300, "min": 1800, "max": 2500 } + }, + "delta": { + "pass_rate": "+0.50", + "time_seconds": "+13.0", + "tokens": "+1700" + } + }, + + "notes": [ + "Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value", + "Eval 3 shows high variance (50% ± 40%) - may be flaky or model-dependent", + "Without-skill runs consistently fail on table extraction expectations", + "Skill adds 13s average execution time but improves pass rate by 50%" + ] +} +``` + +**Fields:** + +- `metadata`: Information about the benchmark run + - `skill_name`: Name of the skill + - `timestamp`: When the benchmark was run + - `evals_run`: List of eval names or IDs + - `runs_per_configuration`: Number of runs per config (e.g. 3) +- `runs[]`: Individual run results + - `eval_id`: Numeric eval identifier + - `eval_name`: Human-readable eval name (used as section header in the viewer) + - `configuration`: Must be `"with_skill"` or `"without_skill"` (the viewer uses this exact string for grouping and color coding) + - `run_number`: Integer run number (1, 2, 3...) + - `result`: Nested object with `pass_rate`, `passed`, `total`, `time_seconds`, `tokens`, `errors` +- `run_summary`: Statistical aggregates per configuration + - `with_skill` / `without_skill`: Each contains `pass_rate`, `time_seconds`, `tokens` objects with `mean` and `stddev` fields + - `delta`: Difference strings like `"+0.50"`, `"+13.0"`, `"+1700"` +- `notes`: Freeform observations from the analyzer + +**Important:** The viewer reads these field names exactly. Using `config` instead of `configuration`, or putting `pass_rate` at the top level of a run instead of nested under `result`, will cause the viewer to show empty/zero values. Always reference this schema when generating benchmark.json manually. + +--- + +## comparison.json + +Output from blind comparator. Located at `/comparison-N.json`. + +```json +{ + "winner": "A", + "reasoning": "Output A provides a complete solution with proper formatting and all required fields. Output B is missing the date field and has formatting inconsistencies.", + "rubric": { + "A": { + "content": { + "correctness": 5, + "completeness": 5, + "accuracy": 4 + }, + "structure": { + "organization": 4, + "formatting": 5, + "usability": 4 + }, + "content_score": 4.7, + "structure_score": 4.3, + "overall_score": 9.0 + }, + "B": { + "content": { + "correctness": 3, + "completeness": 2, + "accuracy": 3 + }, + "structure": { + "organization": 3, + "formatting": 2, + "usability": 3 + }, + "content_score": 2.7, + "structure_score": 2.7, + "overall_score": 5.4 + } + }, + "output_quality": { + "A": { + "score": 9, + "strengths": ["Complete solution", "Well-formatted", "All fields present"], + "weaknesses": ["Minor style inconsistency in header"] + }, + "B": { + "score": 5, + "strengths": ["Readable output", "Correct basic structure"], + "weaknesses": ["Missing date field", "Formatting inconsistencies", "Partial data extraction"] + } + }, + "expectation_results": { + "A": { + "passed": 4, + "total": 5, + "pass_rate": 0.8, + "details": [{ "text": "Output includes name", "passed": true }] + }, + "B": { + "passed": 3, + "total": 5, + "pass_rate": 0.6, + "details": [{ "text": "Output includes name", "passed": true }] + } + } +} +``` + +--- + +## analysis.json + +Output from post-hoc analyzer. Located at `/analysis.json`. + +```json +{ + "comparison_summary": { + "winner": "A", + "winner_skill": "path/to/winner/skill", + "loser_skill": "path/to/loser/skill", + "comparator_reasoning": "Brief summary of why comparator chose winner" + }, + "winner_strengths": [ + "Clear step-by-step instructions for handling multi-page documents", + "Included validation script that caught formatting errors" + ], + "loser_weaknesses": [ + "Vague instruction 'process the document appropriately' led to inconsistent behavior", + "No script for validation, agent had to improvise" + ], + "instruction_following": { + "winner": { + "score": 9, + "issues": ["Minor: skipped optional logging step"] + }, + "loser": { + "score": 6, + "issues": ["Did not use the skill's formatting template", "Invented own approach instead of following step 3"] + } + }, + "improvement_suggestions": [ + { + "priority": "high", + "category": "instructions", + "suggestion": "Replace 'process the document appropriately' with explicit steps", + "expected_impact": "Would eliminate ambiguity that caused inconsistent behavior" + } + ], + "transcript_insights": { + "winner_execution_pattern": "Read skill -> Followed 5-step process -> Used validation script", + "loser_execution_pattern": "Read skill -> Unclear on approach -> Tried 3 different methods" + } +} +``` diff --git a/.agents/skills/skill-creator/scripts/__init__.py b/.agents/skills/skill-creator/scripts/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/.agents/skills/skill-creator/scripts/aggregate_benchmark.py b/.agents/skills/skill-creator/scripts/aggregate_benchmark.py new file mode 100755 index 000000000000..3e66e8c105be --- /dev/null +++ b/.agents/skills/skill-creator/scripts/aggregate_benchmark.py @@ -0,0 +1,401 @@ +#!/usr/bin/env python3 +""" +Aggregate individual run results into benchmark summary statistics. + +Reads grading.json files from run directories and produces: +- run_summary with mean, stddev, min, max for each metric +- delta between with_skill and without_skill configurations + +Usage: + python aggregate_benchmark.py + +Example: + python aggregate_benchmark.py benchmarks/2026-01-15T10-30-00/ + +The script supports two directory layouts: + + Workspace layout (from skill-creator iterations): + / + └── eval-N/ + ├── with_skill/ + │ ├── run-1/grading.json + │ └── run-2/grading.json + └── without_skill/ + ├── run-1/grading.json + └── run-2/grading.json + + Legacy layout (with runs/ subdirectory): + / + └── runs/ + └── eval-N/ + ├── with_skill/ + │ └── run-1/grading.json + └── without_skill/ + └── run-1/grading.json +""" + +import argparse +import json +import math +import sys +from datetime import datetime, timezone +from pathlib import Path + + +def calculate_stats(values: list[float]) -> dict: + """Calculate mean, stddev, min, max for a list of values.""" + if not values: + return {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0} + + n = len(values) + mean = sum(values) / n + + if n > 1: + variance = sum((x - mean) ** 2 for x in values) / (n - 1) + stddev = math.sqrt(variance) + else: + stddev = 0.0 + + return { + "mean": round(mean, 4), + "stddev": round(stddev, 4), + "min": round(min(values), 4), + "max": round(max(values), 4) + } + + +def load_run_results(benchmark_dir: Path) -> dict: + """ + Load all run results from a benchmark directory. + + Returns dict keyed by config name (e.g. "with_skill"/"without_skill", + or "new_skill"/"old_skill"), each containing a list of run results. + """ + # Support both layouts: eval dirs directly under benchmark_dir, or under runs/ + runs_dir = benchmark_dir / "runs" + if runs_dir.exists(): + search_dir = runs_dir + elif list(benchmark_dir.glob("eval-*")): + search_dir = benchmark_dir + else: + print(f"No eval directories found in {benchmark_dir} or {benchmark_dir / 'runs'}") + return {} + + results: dict[str, list] = {} + + for eval_idx, eval_dir in enumerate(sorted(search_dir.glob("eval-*"))): + metadata_path = eval_dir / "eval_metadata.json" + if metadata_path.exists(): + try: + with open(metadata_path) as mf: + eval_id = json.load(mf).get("eval_id", eval_idx) + except (json.JSONDecodeError, OSError): + eval_id = eval_idx + else: + try: + eval_id = int(eval_dir.name.split("-")[1]) + except ValueError: + eval_id = eval_idx + + # Discover config directories dynamically rather than hardcoding names + for config_dir in sorted(eval_dir.iterdir()): + if not config_dir.is_dir(): + continue + # Skip non-config directories (inputs, outputs, etc.) + if not list(config_dir.glob("run-*")): + continue + config = config_dir.name + if config not in results: + results[config] = [] + + for run_dir in sorted(config_dir.glob("run-*")): + run_number = int(run_dir.name.split("-")[1]) + grading_file = run_dir / "grading.json" + + if not grading_file.exists(): + print(f"Warning: grading.json not found in {run_dir}") + continue + + try: + with open(grading_file) as f: + grading = json.load(f) + except json.JSONDecodeError as e: + print(f"Warning: Invalid JSON in {grading_file}: {e}") + continue + + # Extract metrics + result = { + "eval_id": eval_id, + "run_number": run_number, + "pass_rate": grading.get("summary", {}).get("pass_rate", 0.0), + "passed": grading.get("summary", {}).get("passed", 0), + "failed": grading.get("summary", {}).get("failed", 0), + "total": grading.get("summary", {}).get("total", 0), + } + + # Extract timing — check grading.json first, then sibling timing.json + timing = grading.get("timing", {}) + result["time_seconds"] = timing.get("total_duration_seconds", 0.0) + timing_file = run_dir / "timing.json" + if result["time_seconds"] == 0.0 and timing_file.exists(): + try: + with open(timing_file) as tf: + timing_data = json.load(tf) + result["time_seconds"] = timing_data.get("total_duration_seconds", 0.0) + result["tokens"] = timing_data.get("total_tokens", 0) + except json.JSONDecodeError: + pass + + # Extract metrics if available + metrics = grading.get("execution_metrics", {}) + result["tool_calls"] = metrics.get("total_tool_calls", 0) + if not result.get("tokens"): + result["tokens"] = metrics.get("output_chars", 0) + result["errors"] = metrics.get("errors_encountered", 0) + + # Extract expectations — viewer requires fields: text, passed, evidence + raw_expectations = grading.get("expectations", []) + for exp in raw_expectations: + if "text" not in exp or "passed" not in exp: + print(f"Warning: expectation in {grading_file} missing required fields (text, passed, evidence): {exp}") + result["expectations"] = raw_expectations + + # Extract notes from user_notes_summary + notes_summary = grading.get("user_notes_summary", {}) + notes = [] + notes.extend(notes_summary.get("uncertainties", [])) + notes.extend(notes_summary.get("needs_review", [])) + notes.extend(notes_summary.get("workarounds", [])) + result["notes"] = notes + + results[config].append(result) + + return results + + +def aggregate_results(results: dict) -> dict: + """ + Aggregate run results into summary statistics. + + Returns run_summary with stats for each configuration and delta. + """ + run_summary = {} + configs = list(results.keys()) + + for config in configs: + runs = results.get(config, []) + + if not runs: + run_summary[config] = { + "pass_rate": {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0}, + "time_seconds": {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0}, + "tokens": {"mean": 0, "stddev": 0, "min": 0, "max": 0} + } + continue + + pass_rates = [r["pass_rate"] for r in runs] + times = [r["time_seconds"] for r in runs] + tokens = [r.get("tokens", 0) for r in runs] + + run_summary[config] = { + "pass_rate": calculate_stats(pass_rates), + "time_seconds": calculate_stats(times), + "tokens": calculate_stats(tokens) + } + + # Calculate delta between the first two configs (if two exist) + if len(configs) >= 2: + primary = run_summary.get(configs[0], {}) + baseline = run_summary.get(configs[1], {}) + else: + primary = run_summary.get(configs[0], {}) if configs else {} + baseline = {} + + delta_pass_rate = primary.get("pass_rate", {}).get("mean", 0) - baseline.get("pass_rate", {}).get("mean", 0) + delta_time = primary.get("time_seconds", {}).get("mean", 0) - baseline.get("time_seconds", {}).get("mean", 0) + delta_tokens = primary.get("tokens", {}).get("mean", 0) - baseline.get("tokens", {}).get("mean", 0) + + run_summary["delta"] = { + "pass_rate": f"{delta_pass_rate:+.2f}", + "time_seconds": f"{delta_time:+.1f}", + "tokens": f"{delta_tokens:+.0f}" + } + + return run_summary + + +def generate_benchmark(benchmark_dir: Path, skill_name: str = "", skill_path: str = "") -> dict: + """ + Generate complete benchmark.json from run results. + """ + results = load_run_results(benchmark_dir) + run_summary = aggregate_results(results) + + # Build runs array for benchmark.json + runs = [] + for config in results: + for result in results[config]: + runs.append({ + "eval_id": result["eval_id"], + "configuration": config, + "run_number": result["run_number"], + "result": { + "pass_rate": result["pass_rate"], + "passed": result["passed"], + "failed": result["failed"], + "total": result["total"], + "time_seconds": result["time_seconds"], + "tokens": result.get("tokens", 0), + "tool_calls": result.get("tool_calls", 0), + "errors": result.get("errors", 0) + }, + "expectations": result["expectations"], + "notes": result["notes"] + }) + + # Determine eval IDs from results + eval_ids = sorted(set( + r["eval_id"] + for config in results.values() + for r in config + )) + + benchmark = { + "metadata": { + "skill_name": skill_name or "", + "skill_path": skill_path or "", + "executor_model": "", + "analyzer_model": "", + "timestamp": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), + "evals_run": eval_ids, + "runs_per_configuration": 3 + }, + "runs": runs, + "run_summary": run_summary, + "notes": [] # To be filled by analyzer + } + + return benchmark + + +def generate_markdown(benchmark: dict) -> str: + """Generate human-readable benchmark.md from benchmark data.""" + metadata = benchmark["metadata"] + run_summary = benchmark["run_summary"] + + # Determine config names (excluding "delta") + configs = [k for k in run_summary if k != "delta"] + config_a = configs[0] if len(configs) >= 1 else "config_a" + config_b = configs[1] if len(configs) >= 2 else "config_b" + label_a = config_a.replace("_", " ").title() + label_b = config_b.replace("_", " ").title() + + lines = [ + f"# Skill Benchmark: {metadata['skill_name']}", + "", + f"**Model**: {metadata['executor_model']}", + f"**Date**: {metadata['timestamp']}", + f"**Evals**: {', '.join(map(str, metadata['evals_run']))} ({metadata['runs_per_configuration']} runs each per configuration)", + "", + "## Summary", + "", + f"| Metric | {label_a} | {label_b} | Delta |", + "|--------|------------|---------------|-------|", + ] + + a_summary = run_summary.get(config_a, {}) + b_summary = run_summary.get(config_b, {}) + delta = run_summary.get("delta", {}) + + # Format pass rate + a_pr = a_summary.get("pass_rate", {}) + b_pr = b_summary.get("pass_rate", {}) + lines.append(f"| Pass Rate | {a_pr.get('mean', 0)*100:.0f}% ± {a_pr.get('stddev', 0)*100:.0f}% | {b_pr.get('mean', 0)*100:.0f}% ± {b_pr.get('stddev', 0)*100:.0f}% | {delta.get('pass_rate', '—')} |") + + # Format time + a_time = a_summary.get("time_seconds", {}) + b_time = b_summary.get("time_seconds", {}) + lines.append(f"| Time | {a_time.get('mean', 0):.1f}s ± {a_time.get('stddev', 0):.1f}s | {b_time.get('mean', 0):.1f}s ± {b_time.get('stddev', 0):.1f}s | {delta.get('time_seconds', '—')}s |") + + # Format tokens + a_tokens = a_summary.get("tokens", {}) + b_tokens = b_summary.get("tokens", {}) + lines.append(f"| Tokens | {a_tokens.get('mean', 0):.0f} ± {a_tokens.get('stddev', 0):.0f} | {b_tokens.get('mean', 0):.0f} ± {b_tokens.get('stddev', 0):.0f} | {delta.get('tokens', '—')} |") + + # Notes section + if benchmark.get("notes"): + lines.extend([ + "", + "## Notes", + "" + ]) + for note in benchmark["notes"]: + lines.append(f"- {note}") + + return "\n".join(lines) + + +def main(): + parser = argparse.ArgumentParser( + description="Aggregate benchmark run results into summary statistics" + ) + parser.add_argument( + "benchmark_dir", + type=Path, + help="Path to the benchmark directory" + ) + parser.add_argument( + "--skill-name", + default="", + help="Name of the skill being benchmarked" + ) + parser.add_argument( + "--skill-path", + default="", + help="Path to the skill being benchmarked" + ) + parser.add_argument( + "--output", "-o", + type=Path, + help="Output path for benchmark.json (default: /benchmark.json)" + ) + + args = parser.parse_args() + + if not args.benchmark_dir.exists(): + print(f"Directory not found: {args.benchmark_dir}") + sys.exit(1) + + # Generate benchmark + benchmark = generate_benchmark(args.benchmark_dir, args.skill_name, args.skill_path) + + # Determine output paths + output_json = args.output or (args.benchmark_dir / "benchmark.json") + output_md = output_json.with_suffix(".md") + + # Write benchmark.json + with open(output_json, "w") as f: + json.dump(benchmark, f, indent=2) + print(f"Generated: {output_json}") + + # Write benchmark.md + markdown = generate_markdown(benchmark) + with open(output_md, "w") as f: + f.write(markdown) + print(f"Generated: {output_md}") + + # Print summary + run_summary = benchmark["run_summary"] + configs = [k for k in run_summary if k != "delta"] + delta = run_summary.get("delta", {}) + + print(f"\nSummary:") + for config in configs: + pr = run_summary[config]["pass_rate"]["mean"] + label = config.replace("_", " ").title() + print(f" {label}: {pr*100:.1f}% pass rate") + print(f" Delta: {delta.get('pass_rate', '—')}") + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/generate_report.py b/.agents/skills/skill-creator/scripts/generate_report.py new file mode 100755 index 000000000000..959e30a0014e --- /dev/null +++ b/.agents/skills/skill-creator/scripts/generate_report.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python3 +"""Generate an HTML report from run_loop.py output. + +Takes the JSON output from run_loop.py and generates a visual HTML report +showing each description attempt with check/x for each test case. +Distinguishes between train and test queries. +""" + +import argparse +import html +import json +import sys +from pathlib import Path + + +def generate_html(data: dict, auto_refresh: bool = False, skill_name: str = "") -> str: + """Generate HTML report from loop output data. If auto_refresh is True, adds a meta refresh tag.""" + history = data.get("history", []) + holdout = data.get("holdout", 0) + title_prefix = html.escape(skill_name + " \u2014 ") if skill_name else "" + + # Get all unique queries from train and test sets, with should_trigger info + train_queries: list[dict] = [] + test_queries: list[dict] = [] + if history: + for r in history[0].get("train_results", history[0].get("results", [])): + train_queries.append({"query": r["query"], "should_trigger": r.get("should_trigger", True)}) + if history[0].get("test_results"): + for r in history[0].get("test_results", []): + test_queries.append({"query": r["query"], "should_trigger": r.get("should_trigger", True)}) + + refresh_tag = ' \n' if auto_refresh else "" + + html_parts = [""" + + + +""" + refresh_tag + """ """ + title_prefix + """Skill Description Optimization + + + + + + +

""" + title_prefix + """Skill Description Optimization

+
+ Optimizing your skill's description. This page updates automatically as Claude tests different versions of your skill's description. Each row is an iteration — a new description attempt. The columns show test queries: green checkmarks mean the skill triggered correctly (or correctly didn't trigger), red crosses mean it got it wrong. The "Train" score shows performance on queries used to improve the description; the "Test" score shows performance on held-out queries the optimizer hasn't seen. When it's done, Claude will apply the best-performing description to your skill. +
+"""] + + # Summary section + best_test_score = data.get('best_test_score') + best_train_score = data.get('best_train_score') + html_parts.append(f""" +
+

Original: {html.escape(data.get('original_description', 'N/A'))}

+

Best: {html.escape(data.get('best_description', 'N/A'))}

+

Best Score: {data.get('best_score', 'N/A')} {'(test)' if best_test_score else '(train)'}

+

Iterations: {data.get('iterations_run', 0)} | Train: {data.get('train_size', '?')} | Test: {data.get('test_size', '?')}

+
+""") + + # Legend + html_parts.append(""" +
+ Query columns: + Should trigger + Should NOT trigger + Train + Test +
+""") + + # Table header + html_parts.append(""" +
+ + + + + + + +""") + + # Add column headers for train queries + for qinfo in train_queries: + polarity = "positive-col" if qinfo["should_trigger"] else "negative-col" + html_parts.append(f' \n') + + # Add column headers for test queries (different color) + for qinfo in test_queries: + polarity = "positive-col" if qinfo["should_trigger"] else "negative-col" + html_parts.append(f' \n') + + html_parts.append(""" + + +""") + + # Find best iteration for highlighting + if test_queries: + best_iter = max(history, key=lambda h: h.get("test_passed") or 0).get("iteration") + else: + best_iter = max(history, key=lambda h: h.get("train_passed", h.get("passed", 0))).get("iteration") + + # Add rows for each iteration + for h in history: + iteration = h.get("iteration", "?") + train_passed = h.get("train_passed", h.get("passed", 0)) + train_total = h.get("train_total", h.get("total", 0)) + test_passed = h.get("test_passed") + test_total = h.get("test_total") + description = h.get("description", "") + train_results = h.get("train_results", h.get("results", [])) + test_results = h.get("test_results", []) + + # Create lookups for results by query + train_by_query = {r["query"]: r for r in train_results} + test_by_query = {r["query"]: r for r in test_results} if test_results else {} + + # Compute aggregate correct/total runs across all retries + def aggregate_runs(results: list[dict]) -> tuple[int, int]: + correct = 0 + total = 0 + for r in results: + runs = r.get("runs", 0) + triggers = r.get("triggers", 0) + total += runs + if r.get("should_trigger", True): + correct += triggers + else: + correct += runs - triggers + return correct, total + + train_correct, train_runs = aggregate_runs(train_results) + test_correct, test_runs = aggregate_runs(test_results) + + # Determine score classes + def score_class(correct: int, total: int) -> str: + if total > 0: + ratio = correct / total + if ratio >= 0.8: + return "score-good" + elif ratio >= 0.5: + return "score-ok" + return "score-bad" + + train_class = score_class(train_correct, train_runs) + test_class = score_class(test_correct, test_runs) + + row_class = "best-row" if iteration == best_iter else "" + + html_parts.append(f""" + + + + +""") + + # Add result for each train query + for qinfo in train_queries: + r = train_by_query.get(qinfo["query"], {}) + did_pass = r.get("pass", False) + triggers = r.get("triggers", 0) + runs = r.get("runs", 0) + + icon = "✓" if did_pass else "✗" + css_class = "pass" if did_pass else "fail" + + html_parts.append(f' \n') + + # Add result for each test query (with different background) + for qinfo in test_queries: + r = test_by_query.get(qinfo["query"], {}) + did_pass = r.get("pass", False) + triggers = r.get("triggers", 0) + runs = r.get("runs", 0) + + icon = "✓" if did_pass else "✗" + css_class = "pass" if did_pass else "fail" + + html_parts.append(f' \n') + + html_parts.append(" \n") + + html_parts.append(""" +
IterTrainTestDescription{html.escape(qinfo["query"])}{html.escape(qinfo["query"])}
{iteration}{train_correct}/{train_runs}{test_correct}/{test_runs}{html.escape(description)}{icon}{triggers}/{runs}{icon}{triggers}/{runs}
+
+""") + + html_parts.append(""" + + +""") + + return "".join(html_parts) + + +def main(): + parser = argparse.ArgumentParser(description="Generate HTML report from run_loop output") + parser.add_argument("input", help="Path to JSON output from run_loop.py (or - for stdin)") + parser.add_argument("-o", "--output", default=None, help="Output HTML file (default: stdout)") + parser.add_argument("--skill-name", default="", help="Skill name to include in the report title") + args = parser.parse_args() + + if args.input == "-": + data = json.load(sys.stdin) + else: + data = json.loads(Path(args.input).read_text()) + + html_output = generate_html(data, skill_name=args.skill_name) + + if args.output: + Path(args.output).write_text(html_output) + print(f"Report written to {args.output}", file=sys.stderr) + else: + print(html_output) + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/improve_description.py b/.agents/skills/skill-creator/scripts/improve_description.py new file mode 100755 index 000000000000..06bcec761224 --- /dev/null +++ b/.agents/skills/skill-creator/scripts/improve_description.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python3 +"""Improve a skill description based on eval results. + +Takes eval results (from run_eval.py) and generates an improved description +by calling `claude -p` as a subprocess (same auth pattern as run_eval.py — +uses the session's Claude Code auth, no separate ANTHROPIC_API_KEY needed). +""" + +import argparse +import json +import os +import re +import subprocess +import sys +from pathlib import Path + +from scripts.utils import parse_skill_md + + +def _call_claude(prompt: str, model: str | None, timeout: int = 300) -> str: + """Run `claude -p` with the prompt on stdin and return the text response. + + Prompt goes over stdin (not argv) because it embeds the full SKILL.md + body and can easily exceed comfortable argv length. + """ + cmd = ["claude", "-p", "--output-format", "text"] + if model: + cmd.extend(["--model", model]) + + # Remove CLAUDECODE env var to allow nesting claude -p inside a + # Claude Code session. The guard is for interactive terminal conflicts; + # programmatic subprocess usage is safe. Same pattern as run_eval.py. + env = {k: v for k, v in os.environ.items() if k != "CLAUDECODE"} + + result = subprocess.run( + cmd, + input=prompt, + capture_output=True, + text=True, + env=env, + timeout=timeout, + ) + if result.returncode != 0: + raise RuntimeError( + f"claude -p exited {result.returncode}\nstderr: {result.stderr}" + ) + return result.stdout + + +def improve_description( + skill_name: str, + skill_content: str, + current_description: str, + eval_results: dict, + history: list[dict], + model: str, + test_results: dict | None = None, + log_dir: Path | None = None, + iteration: int | None = None, +) -> str: + """Call Claude to improve the description based on eval results.""" + failed_triggers = [ + r for r in eval_results["results"] + if r["should_trigger"] and not r["pass"] + ] + false_triggers = [ + r for r in eval_results["results"] + if not r["should_trigger"] and not r["pass"] + ] + + # Build scores summary + train_score = f"{eval_results['summary']['passed']}/{eval_results['summary']['total']}" + if test_results: + test_score = f"{test_results['summary']['passed']}/{test_results['summary']['total']}" + scores_summary = f"Train: {train_score}, Test: {test_score}" + else: + scores_summary = f"Train: {train_score}" + + prompt = f"""You are optimizing a skill description for a Claude Code skill called "{skill_name}". A "skill" is sort of like a prompt, but with progressive disclosure -- there's a title and description that Claude sees when deciding whether to use the skill, and then if it does use the skill, it reads the .md file which has lots more details and potentially links to other resources in the skill folder like helper files and scripts and additional documentation or examples. + +The description appears in Claude's "available_skills" list. When a user sends a query, Claude decides whether to invoke the skill based solely on the title and on this description. Your goal is to write a description that triggers for relevant queries, and doesn't trigger for irrelevant ones. + +Here's the current description: + +"{current_description}" + + +Current scores ({scores_summary}): + +""" + if failed_triggers: + prompt += "FAILED TO TRIGGER (should have triggered but didn't):\n" + for r in failed_triggers: + prompt += f' - "{r["query"]}" (triggered {r["triggers"]}/{r["runs"]} times)\n' + prompt += "\n" + + if false_triggers: + prompt += "FALSE TRIGGERS (triggered but shouldn't have):\n" + for r in false_triggers: + prompt += f' - "{r["query"]}" (triggered {r["triggers"]}/{r["runs"]} times)\n' + prompt += "\n" + + if history: + prompt += "PREVIOUS ATTEMPTS (do NOT repeat these — try something structurally different):\n\n" + for h in history: + train_s = f"{h.get('train_passed', h.get('passed', 0))}/{h.get('train_total', h.get('total', 0))}" + test_s = f"{h.get('test_passed', '?')}/{h.get('test_total', '?')}" if h.get('test_passed') is not None else None + score_str = f"train={train_s}" + (f", test={test_s}" if test_s else "") + prompt += f'\n' + prompt += f'Description: "{h["description"]}"\n' + if "results" in h: + prompt += "Train results:\n" + for r in h["results"]: + status = "PASS" if r["pass"] else "FAIL" + prompt += f' [{status}] "{r["query"][:80]}" (triggered {r["triggers"]}/{r["runs"]})\n' + if h.get("note"): + prompt += f'Note: {h["note"]}\n' + prompt += "\n\n" + + prompt += f""" + +Skill content (for context on what the skill does): + +{skill_content} + + +Based on the failures, write a new and improved description that is more likely to trigger correctly. When I say "based on the failures", it's a bit of a tricky line to walk because we don't want to overfit to the specific cases you're seeing. So what I DON'T want you to do is produce an ever-expanding list of specific queries that this skill should or shouldn't trigger for. Instead, try to generalize from the failures to broader categories of user intent and situations where this skill would be useful or not useful. The reason for this is twofold: + +1. Avoid overfitting +2. The list might get loooong and it's injected into ALL queries and there might be a lot of skills, so we don't want to blow too much space on any given description. + +Concretely, your description should not be more than about 100-200 words, even if that comes at the cost of accuracy. There is a hard limit of 1024 characters — descriptions over that will be truncated, so stay comfortably under it. + +Here are some tips that we've found to work well in writing these descriptions: +- The skill should be phrased in the imperative -- "Use this skill for" rather than "this skill does" +- The skill description should focus on the user's intent, what they are trying to achieve, vs. the implementation details of how the skill works. +- The description competes with other skills for Claude's attention — make it distinctive and immediately recognizable. +- If you're getting lots of failures after repeated attempts, change things up. Try different sentence structures or wordings. + +I'd encourage you to be creative and mix up the style in different iterations since you'll have multiple opportunities to try different approaches and we'll just grab the highest-scoring one at the end. + +Please respond with only the new description text in tags, nothing else.""" + + text = _call_claude(prompt, model) + + match = re.search(r"(.*?)", text, re.DOTALL) + description = match.group(1).strip().strip('"') if match else text.strip().strip('"') + + transcript: dict = { + "iteration": iteration, + "prompt": prompt, + "response": text, + "parsed_description": description, + "char_count": len(description), + "over_limit": len(description) > 1024, + } + + # Safety net: the prompt already states the 1024-char hard limit, but if + # the model blew past it anyway, make one fresh single-turn call that + # quotes the too-long version and asks for a shorter rewrite. (The old + # SDK path did this as a true multi-turn; `claude -p` is one-shot, so we + # inline the prior output into the new prompt instead.) + if len(description) > 1024: + shorten_prompt = ( + f"{prompt}\n\n" + f"---\n\n" + f"A previous attempt produced this description, which at " + f"{len(description)} characters is over the 1024-character hard limit:\n\n" + f'"{description}"\n\n' + f"Rewrite it to be under 1024 characters while keeping the most " + f"important trigger words and intent coverage. Respond with only " + f"the new description in tags." + ) + shorten_text = _call_claude(shorten_prompt, model) + match = re.search(r"(.*?)", shorten_text, re.DOTALL) + shortened = match.group(1).strip().strip('"') if match else shorten_text.strip().strip('"') + + transcript["rewrite_prompt"] = shorten_prompt + transcript["rewrite_response"] = shorten_text + transcript["rewrite_description"] = shortened + transcript["rewrite_char_count"] = len(shortened) + description = shortened + + transcript["final_description"] = description + + if log_dir: + log_dir.mkdir(parents=True, exist_ok=True) + log_file = log_dir / f"improve_iter_{iteration or 'unknown'}.json" + log_file.write_text(json.dumps(transcript, indent=2)) + + return description + + +def main(): + parser = argparse.ArgumentParser(description="Improve a skill description based on eval results") + parser.add_argument("--eval-results", required=True, help="Path to eval results JSON (from run_eval.py)") + parser.add_argument("--skill-path", required=True, help="Path to skill directory") + parser.add_argument("--history", default=None, help="Path to history JSON (previous attempts)") + parser.add_argument("--model", required=True, help="Model for improvement") + parser.add_argument("--verbose", action="store_true", help="Print thinking to stderr") + args = parser.parse_args() + + skill_path = Path(args.skill_path) + if not (skill_path / "SKILL.md").exists(): + print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr) + sys.exit(1) + + eval_results = json.loads(Path(args.eval_results).read_text()) + history = [] + if args.history: + history = json.loads(Path(args.history).read_text()) + + name, _, content = parse_skill_md(skill_path) + current_description = eval_results["description"] + + if args.verbose: + print(f"Current: {current_description}", file=sys.stderr) + print(f"Score: {eval_results['summary']['passed']}/{eval_results['summary']['total']}", file=sys.stderr) + + new_description = improve_description( + skill_name=name, + skill_content=content, + current_description=current_description, + eval_results=eval_results, + history=history, + model=args.model, + ) + + if args.verbose: + print(f"Improved: {new_description}", file=sys.stderr) + + # Output as JSON with both the new description and updated history + output = { + "description": new_description, + "history": history + [{ + "description": current_description, + "passed": eval_results["summary"]["passed"], + "failed": eval_results["summary"]["failed"], + "total": eval_results["summary"]["total"], + "results": eval_results["results"], + }], + } + print(json.dumps(output, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/package_skill.py b/.agents/skills/skill-creator/scripts/package_skill.py new file mode 100755 index 000000000000..f48eac444656 --- /dev/null +++ b/.agents/skills/skill-creator/scripts/package_skill.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +""" +Skill Packager - Creates a distributable .skill file of a skill folder + +Usage: + python utils/package_skill.py [output-directory] + +Example: + python utils/package_skill.py skills/public/my-skill + python utils/package_skill.py skills/public/my-skill ./dist +""" + +import fnmatch +import sys +import zipfile +from pathlib import Path +from scripts.quick_validate import validate_skill + +# Patterns to exclude when packaging skills. +EXCLUDE_DIRS = {"__pycache__", "node_modules"} +EXCLUDE_GLOBS = {"*.pyc"} +EXCLUDE_FILES = {".DS_Store"} +# Directories excluded only at the skill root (not when nested deeper). +ROOT_EXCLUDE_DIRS = {"evals"} + + +def should_exclude(rel_path: Path) -> bool: + """Check if a path should be excluded from packaging.""" + parts = rel_path.parts + if any(part in EXCLUDE_DIRS for part in parts): + return True + # rel_path is relative to skill_path.parent, so parts[0] is the skill + # folder name and parts[1] (if present) is the first subdir. + if len(parts) > 1 and parts[1] in ROOT_EXCLUDE_DIRS: + return True + name = rel_path.name + if name in EXCLUDE_FILES: + return True + return any(fnmatch.fnmatch(name, pat) for pat in EXCLUDE_GLOBS) + + +def package_skill(skill_path, output_dir=None): + """ + Package a skill folder into a .skill file. + + Args: + skill_path: Path to the skill folder + output_dir: Optional output directory for the .skill file (defaults to current directory) + + Returns: + Path to the created .skill file, or None if error + """ + skill_path = Path(skill_path).resolve() + + # Validate skill folder exists + if not skill_path.exists(): + print(f"❌ Error: Skill folder not found: {skill_path}") + return None + + if not skill_path.is_dir(): + print(f"❌ Error: Path is not a directory: {skill_path}") + return None + + # Validate SKILL.md exists + skill_md = skill_path / "SKILL.md" + if not skill_md.exists(): + print(f"❌ Error: SKILL.md not found in {skill_path}") + return None + + # Run validation before packaging + print("🔍 Validating skill...") + valid, message = validate_skill(skill_path) + if not valid: + print(f"❌ Validation failed: {message}") + print(" Please fix the validation errors before packaging.") + return None + print(f"✅ {message}\n") + + # Determine output location + skill_name = skill_path.name + if output_dir: + output_path = Path(output_dir).resolve() + output_path.mkdir(parents=True, exist_ok=True) + else: + output_path = Path.cwd() + + skill_filename = output_path / f"{skill_name}.skill" + + # Create the .skill file (zip format) + try: + with zipfile.ZipFile(skill_filename, 'w', zipfile.ZIP_DEFLATED) as zipf: + # Walk through the skill directory, excluding build artifacts + for file_path in skill_path.rglob('*'): + if not file_path.is_file(): + continue + arcname = file_path.relative_to(skill_path.parent) + if should_exclude(arcname): + print(f" Skipped: {arcname}") + continue + zipf.write(file_path, arcname) + print(f" Added: {arcname}") + + print(f"\n✅ Successfully packaged skill to: {skill_filename}") + return skill_filename + + except Exception as e: + print(f"❌ Error creating .skill file: {e}") + return None + + +def main(): + if len(sys.argv) < 2: + print("Usage: python utils/package_skill.py [output-directory]") + print("\nExample:") + print(" python utils/package_skill.py skills/public/my-skill") + print(" python utils/package_skill.py skills/public/my-skill ./dist") + sys.exit(1) + + skill_path = sys.argv[1] + output_dir = sys.argv[2] if len(sys.argv) > 2 else None + + print(f"📦 Packaging skill: {skill_path}") + if output_dir: + print(f" Output directory: {output_dir}") + print() + + result = package_skill(skill_path, output_dir) + + if result: + sys.exit(0) + else: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/quick_validate.py b/.agents/skills/skill-creator/scripts/quick_validate.py new file mode 100755 index 000000000000..ed8e1dddce77 --- /dev/null +++ b/.agents/skills/skill-creator/scripts/quick_validate.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +""" +Quick validation script for skills - minimal version +""" + +import sys +import os +import re +import yaml +from pathlib import Path + +def validate_skill(skill_path): + """Basic validation of a skill""" + skill_path = Path(skill_path) + + # Check SKILL.md exists + skill_md = skill_path / 'SKILL.md' + if not skill_md.exists(): + return False, "SKILL.md not found" + + # Read and validate frontmatter + content = skill_md.read_text() + if not content.startswith('---'): + return False, "No YAML frontmatter found" + + # Extract frontmatter + match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) + if not match: + return False, "Invalid frontmatter format" + + frontmatter_text = match.group(1) + + # Parse YAML frontmatter + try: + frontmatter = yaml.safe_load(frontmatter_text) + if not isinstance(frontmatter, dict): + return False, "Frontmatter must be a YAML dictionary" + except yaml.YAMLError as e: + return False, f"Invalid YAML in frontmatter: {e}" + + # Define allowed properties + ALLOWED_PROPERTIES = {'name', 'description', 'license', 'allowed-tools', 'metadata', 'compatibility'} + + # Check for unexpected properties (excluding nested keys under metadata) + unexpected_keys = set(frontmatter.keys()) - ALLOWED_PROPERTIES + if unexpected_keys: + return False, ( + f"Unexpected key(s) in SKILL.md frontmatter: {', '.join(sorted(unexpected_keys))}. " + f"Allowed properties are: {', '.join(sorted(ALLOWED_PROPERTIES))}" + ) + + # Check required fields + if 'name' not in frontmatter: + return False, "Missing 'name' in frontmatter" + if 'description' not in frontmatter: + return False, "Missing 'description' in frontmatter" + + # Extract name for validation + name = frontmatter.get('name', '') + if not isinstance(name, str): + return False, f"Name must be a string, got {type(name).__name__}" + name = name.strip() + if name: + # Check naming convention (kebab-case: lowercase with hyphens) + if not re.match(r'^[a-z0-9-]+$', name): + return False, f"Name '{name}' should be kebab-case (lowercase letters, digits, and hyphens only)" + if name.startswith('-') or name.endswith('-') or '--' in name: + return False, f"Name '{name}' cannot start/end with hyphen or contain consecutive hyphens" + # Check name length (max 64 characters per spec) + if len(name) > 64: + return False, f"Name is too long ({len(name)} characters). Maximum is 64 characters." + + # Extract and validate description + description = frontmatter.get('description', '') + if not isinstance(description, str): + return False, f"Description must be a string, got {type(description).__name__}" + description = description.strip() + if description: + # Check for angle brackets + if '<' in description or '>' in description: + return False, "Description cannot contain angle brackets (< or >)" + # Check description length (max 1024 characters per spec) + if len(description) > 1024: + return False, f"Description is too long ({len(description)} characters). Maximum is 1024 characters." + + # Validate compatibility field if present (optional) + compatibility = frontmatter.get('compatibility', '') + if compatibility: + if not isinstance(compatibility, str): + return False, f"Compatibility must be a string, got {type(compatibility).__name__}" + if len(compatibility) > 500: + return False, f"Compatibility is too long ({len(compatibility)} characters). Maximum is 500 characters." + + return True, "Skill is valid!" + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python quick_validate.py ") + sys.exit(1) + + valid, message = validate_skill(sys.argv[1]) + print(message) + sys.exit(0 if valid else 1) \ No newline at end of file diff --git a/.agents/skills/skill-creator/scripts/run_eval.py b/.agents/skills/skill-creator/scripts/run_eval.py new file mode 100755 index 000000000000..e58c70bea39d --- /dev/null +++ b/.agents/skills/skill-creator/scripts/run_eval.py @@ -0,0 +1,310 @@ +#!/usr/bin/env python3 +"""Run trigger evaluation for a skill description. + +Tests whether a skill's description causes Claude to trigger (read the skill) +for a set of queries. Outputs results as JSON. +""" + +import argparse +import json +import os +import select +import subprocess +import sys +import time +import uuid +from concurrent.futures import ProcessPoolExecutor, as_completed +from pathlib import Path + +from scripts.utils import parse_skill_md + + +def find_project_root() -> Path: + """Find the project root by walking up from cwd looking for .claude/. + + Mimics how Claude Code discovers its project root, so the command file + we create ends up where claude -p will look for it. + """ + current = Path.cwd() + for parent in [current, *current.parents]: + if (parent / ".claude").is_dir(): + return parent + return current + + +def run_single_query( + query: str, + skill_name: str, + skill_description: str, + timeout: int, + project_root: str, + model: str | None = None, +) -> bool: + """Run a single query and return whether the skill was triggered. + + Creates a command file in .claude/commands/ so it appears in Claude's + available_skills list, then runs `claude -p` with the raw query. + Uses --include-partial-messages to detect triggering early from + stream events (content_block_start) rather than waiting for the + full assistant message, which only arrives after tool execution. + """ + unique_id = uuid.uuid4().hex[:8] + clean_name = f"{skill_name}-skill-{unique_id}" + project_commands_dir = Path(project_root) / ".claude" / "commands" + command_file = project_commands_dir / f"{clean_name}.md" + + try: + project_commands_dir.mkdir(parents=True, exist_ok=True) + # Use YAML block scalar to avoid breaking on quotes in description + indented_desc = "\n ".join(skill_description.split("\n")) + command_content = ( + f"---\n" + f"description: |\n" + f" {indented_desc}\n" + f"---\n\n" + f"# {skill_name}\n\n" + f"This skill handles: {skill_description}\n" + ) + command_file.write_text(command_content) + + cmd = [ + "claude", + "-p", query, + "--output-format", "stream-json", + "--verbose", + "--include-partial-messages", + ] + if model: + cmd.extend(["--model", model]) + + # Remove CLAUDECODE env var to allow nesting claude -p inside a + # Claude Code session. The guard is for interactive terminal conflicts; + # programmatic subprocess usage is safe. + env = {k: v for k, v in os.environ.items() if k != "CLAUDECODE"} + + process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + cwd=project_root, + env=env, + ) + + triggered = False + start_time = time.time() + buffer = "" + # Track state for stream event detection + pending_tool_name = None + accumulated_json = "" + + try: + while time.time() - start_time < timeout: + if process.poll() is not None: + remaining = process.stdout.read() + if remaining: + buffer += remaining.decode("utf-8", errors="replace") + break + + ready, _, _ = select.select([process.stdout], [], [], 1.0) + if not ready: + continue + + chunk = os.read(process.stdout.fileno(), 8192) + if not chunk: + break + buffer += chunk.decode("utf-8", errors="replace") + + while "\n" in buffer: + line, buffer = buffer.split("\n", 1) + line = line.strip() + if not line: + continue + + try: + event = json.loads(line) + except json.JSONDecodeError: + continue + + # Early detection via stream events + if event.get("type") == "stream_event": + se = event.get("event", {}) + se_type = se.get("type", "") + + if se_type == "content_block_start": + cb = se.get("content_block", {}) + if cb.get("type") == "tool_use": + tool_name = cb.get("name", "") + if tool_name in ("Skill", "Read"): + pending_tool_name = tool_name + accumulated_json = "" + else: + return False + + elif se_type == "content_block_delta" and pending_tool_name: + delta = se.get("delta", {}) + if delta.get("type") == "input_json_delta": + accumulated_json += delta.get("partial_json", "") + if clean_name in accumulated_json: + return True + + elif se_type in ("content_block_stop", "message_stop"): + if pending_tool_name: + return clean_name in accumulated_json + if se_type == "message_stop": + return False + + # Fallback: full assistant message + elif event.get("type") == "assistant": + message = event.get("message", {}) + for content_item in message.get("content", []): + if content_item.get("type") != "tool_use": + continue + tool_name = content_item.get("name", "") + tool_input = content_item.get("input", {}) + if tool_name == "Skill" and clean_name in tool_input.get("skill", ""): + triggered = True + elif tool_name == "Read" and clean_name in tool_input.get("file_path", ""): + triggered = True + return triggered + + elif event.get("type") == "result": + return triggered + finally: + # Clean up process on any exit path (return, exception, timeout) + if process.poll() is None: + process.kill() + process.wait() + + return triggered + finally: + if command_file.exists(): + command_file.unlink() + + +def run_eval( + eval_set: list[dict], + skill_name: str, + description: str, + num_workers: int, + timeout: int, + project_root: Path, + runs_per_query: int = 1, + trigger_threshold: float = 0.5, + model: str | None = None, +) -> dict: + """Run the full eval set and return results.""" + results = [] + + with ProcessPoolExecutor(max_workers=num_workers) as executor: + future_to_info = {} + for item in eval_set: + for run_idx in range(runs_per_query): + future = executor.submit( + run_single_query, + item["query"], + skill_name, + description, + timeout, + str(project_root), + model, + ) + future_to_info[future] = (item, run_idx) + + query_triggers: dict[str, list[bool]] = {} + query_items: dict[str, dict] = {} + for future in as_completed(future_to_info): + item, _ = future_to_info[future] + query = item["query"] + query_items[query] = item + if query not in query_triggers: + query_triggers[query] = [] + try: + query_triggers[query].append(future.result()) + except Exception as e: + print(f"Warning: query failed: {e}", file=sys.stderr) + query_triggers[query].append(False) + + for query, triggers in query_triggers.items(): + item = query_items[query] + trigger_rate = sum(triggers) / len(triggers) + should_trigger = item["should_trigger"] + if should_trigger: + did_pass = trigger_rate >= trigger_threshold + else: + did_pass = trigger_rate < trigger_threshold + results.append({ + "query": query, + "should_trigger": should_trigger, + "trigger_rate": trigger_rate, + "triggers": sum(triggers), + "runs": len(triggers), + "pass": did_pass, + }) + + passed = sum(1 for r in results if r["pass"]) + total = len(results) + + return { + "skill_name": skill_name, + "description": description, + "results": results, + "summary": { + "total": total, + "passed": passed, + "failed": total - passed, + }, + } + + +def main(): + parser = argparse.ArgumentParser(description="Run trigger evaluation for a skill description") + parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file") + parser.add_argument("--skill-path", required=True, help="Path to skill directory") + parser.add_argument("--description", default=None, help="Override description to test") + parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers") + parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds") + parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query") + parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold") + parser.add_argument("--model", default=None, help="Model to use for claude -p (default: user's configured model)") + parser.add_argument("--verbose", action="store_true", help="Print progress to stderr") + args = parser.parse_args() + + eval_set = json.loads(Path(args.eval_set).read_text()) + skill_path = Path(args.skill_path) + + if not (skill_path / "SKILL.md").exists(): + print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr) + sys.exit(1) + + name, original_description, content = parse_skill_md(skill_path) + description = args.description or original_description + project_root = find_project_root() + + if args.verbose: + print(f"Evaluating: {description}", file=sys.stderr) + + output = run_eval( + eval_set=eval_set, + skill_name=name, + description=description, + num_workers=args.num_workers, + timeout=args.timeout, + project_root=project_root, + runs_per_query=args.runs_per_query, + trigger_threshold=args.trigger_threshold, + model=args.model, + ) + + if args.verbose: + summary = output["summary"] + print(f"Results: {summary['passed']}/{summary['total']} passed", file=sys.stderr) + for r in output["results"]: + status = "PASS" if r["pass"] else "FAIL" + rate_str = f"{r['triggers']}/{r['runs']}" + print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:70]}", file=sys.stderr) + + print(json.dumps(output, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/run_loop.py b/.agents/skills/skill-creator/scripts/run_loop.py new file mode 100755 index 000000000000..30a263d674ef --- /dev/null +++ b/.agents/skills/skill-creator/scripts/run_loop.py @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 +"""Run the eval + improve loop until all pass or max iterations reached. + +Combines run_eval.py and improve_description.py in a loop, tracking history +and returning the best description found. Supports train/test split to prevent +overfitting. +""" + +import argparse +import json +import random +import sys +import tempfile +import time +import webbrowser +from pathlib import Path + +from scripts.generate_report import generate_html +from scripts.improve_description import improve_description +from scripts.run_eval import find_project_root, run_eval +from scripts.utils import parse_skill_md + + +def split_eval_set(eval_set: list[dict], holdout: float, seed: int = 42) -> tuple[list[dict], list[dict]]: + """Split eval set into train and test sets, stratified by should_trigger.""" + random.seed(seed) + + # Separate by should_trigger + trigger = [e for e in eval_set if e["should_trigger"]] + no_trigger = [e for e in eval_set if not e["should_trigger"]] + + # Shuffle each group + random.shuffle(trigger) + random.shuffle(no_trigger) + + # Calculate split points + n_trigger_test = max(1, int(len(trigger) * holdout)) + n_no_trigger_test = max(1, int(len(no_trigger) * holdout)) + + # Split + test_set = trigger[:n_trigger_test] + no_trigger[:n_no_trigger_test] + train_set = trigger[n_trigger_test:] + no_trigger[n_no_trigger_test:] + + return train_set, test_set + + +def run_loop( + eval_set: list[dict], + skill_path: Path, + description_override: str | None, + num_workers: int, + timeout: int, + max_iterations: int, + runs_per_query: int, + trigger_threshold: float, + holdout: float, + model: str, + verbose: bool, + live_report_path: Path | None = None, + log_dir: Path | None = None, +) -> dict: + """Run the eval + improvement loop.""" + project_root = find_project_root() + name, original_description, content = parse_skill_md(skill_path) + current_description = description_override or original_description + + # Split into train/test if holdout > 0 + if holdout > 0: + train_set, test_set = split_eval_set(eval_set, holdout) + if verbose: + print(f"Split: {len(train_set)} train, {len(test_set)} test (holdout={holdout})", file=sys.stderr) + else: + train_set = eval_set + test_set = [] + + history = [] + exit_reason = "unknown" + + for iteration in range(1, max_iterations + 1): + if verbose: + print(f"\n{'='*60}", file=sys.stderr) + print(f"Iteration {iteration}/{max_iterations}", file=sys.stderr) + print(f"Description: {current_description}", file=sys.stderr) + print(f"{'='*60}", file=sys.stderr) + + # Evaluate train + test together in one batch for parallelism + all_queries = train_set + test_set + t0 = time.time() + all_results = run_eval( + eval_set=all_queries, + skill_name=name, + description=current_description, + num_workers=num_workers, + timeout=timeout, + project_root=project_root, + runs_per_query=runs_per_query, + trigger_threshold=trigger_threshold, + model=model, + ) + eval_elapsed = time.time() - t0 + + # Split results back into train/test by matching queries + train_queries_set = {q["query"] for q in train_set} + train_result_list = [r for r in all_results["results"] if r["query"] in train_queries_set] + test_result_list = [r for r in all_results["results"] if r["query"] not in train_queries_set] + + train_passed = sum(1 for r in train_result_list if r["pass"]) + train_total = len(train_result_list) + train_summary = {"passed": train_passed, "failed": train_total - train_passed, "total": train_total} + train_results = {"results": train_result_list, "summary": train_summary} + + if test_set: + test_passed = sum(1 for r in test_result_list if r["pass"]) + test_total = len(test_result_list) + test_summary = {"passed": test_passed, "failed": test_total - test_passed, "total": test_total} + test_results = {"results": test_result_list, "summary": test_summary} + else: + test_results = None + test_summary = None + + history.append({ + "iteration": iteration, + "description": current_description, + "train_passed": train_summary["passed"], + "train_failed": train_summary["failed"], + "train_total": train_summary["total"], + "train_results": train_results["results"], + "test_passed": test_summary["passed"] if test_summary else None, + "test_failed": test_summary["failed"] if test_summary else None, + "test_total": test_summary["total"] if test_summary else None, + "test_results": test_results["results"] if test_results else None, + # For backward compat with report generator + "passed": train_summary["passed"], + "failed": train_summary["failed"], + "total": train_summary["total"], + "results": train_results["results"], + }) + + # Write live report if path provided + if live_report_path: + partial_output = { + "original_description": original_description, + "best_description": current_description, + "best_score": "in progress", + "iterations_run": len(history), + "holdout": holdout, + "train_size": len(train_set), + "test_size": len(test_set), + "history": history, + } + live_report_path.write_text(generate_html(partial_output, auto_refresh=True, skill_name=name)) + + if verbose: + def print_eval_stats(label, results, elapsed): + pos = [r for r in results if r["should_trigger"]] + neg = [r for r in results if not r["should_trigger"]] + tp = sum(r["triggers"] for r in pos) + pos_runs = sum(r["runs"] for r in pos) + fn = pos_runs - tp + fp = sum(r["triggers"] for r in neg) + neg_runs = sum(r["runs"] for r in neg) + tn = neg_runs - fp + total = tp + tn + fp + fn + precision = tp / (tp + fp) if (tp + fp) > 0 else 1.0 + recall = tp / (tp + fn) if (tp + fn) > 0 else 1.0 + accuracy = (tp + tn) / total if total > 0 else 0.0 + print(f"{label}: {tp+tn}/{total} correct, precision={precision:.0%} recall={recall:.0%} accuracy={accuracy:.0%} ({elapsed:.1f}s)", file=sys.stderr) + for r in results: + status = "PASS" if r["pass"] else "FAIL" + rate_str = f"{r['triggers']}/{r['runs']}" + print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:60]}", file=sys.stderr) + + print_eval_stats("Train", train_results["results"], eval_elapsed) + if test_summary: + print_eval_stats("Test ", test_results["results"], 0) + + if train_summary["failed"] == 0: + exit_reason = f"all_passed (iteration {iteration})" + if verbose: + print(f"\nAll train queries passed on iteration {iteration}!", file=sys.stderr) + break + + if iteration == max_iterations: + exit_reason = f"max_iterations ({max_iterations})" + if verbose: + print(f"\nMax iterations reached ({max_iterations}).", file=sys.stderr) + break + + # Improve the description based on train results + if verbose: + print(f"\nImproving description...", file=sys.stderr) + + t0 = time.time() + # Strip test scores from history so improvement model can't see them + blinded_history = [ + {k: v for k, v in h.items() if not k.startswith("test_")} + for h in history + ] + new_description = improve_description( + skill_name=name, + skill_content=content, + current_description=current_description, + eval_results=train_results, + history=blinded_history, + model=model, + log_dir=log_dir, + iteration=iteration, + ) + improve_elapsed = time.time() - t0 + + if verbose: + print(f"Proposed ({improve_elapsed:.1f}s): {new_description}", file=sys.stderr) + + current_description = new_description + + # Find the best iteration by TEST score (or train if no test set) + if test_set: + best = max(history, key=lambda h: h["test_passed"] or 0) + best_score = f"{best['test_passed']}/{best['test_total']}" + else: + best = max(history, key=lambda h: h["train_passed"]) + best_score = f"{best['train_passed']}/{best['train_total']}" + + if verbose: + print(f"\nExit reason: {exit_reason}", file=sys.stderr) + print(f"Best score: {best_score} (iteration {best['iteration']})", file=sys.stderr) + + return { + "exit_reason": exit_reason, + "original_description": original_description, + "best_description": best["description"], + "best_score": best_score, + "best_train_score": f"{best['train_passed']}/{best['train_total']}", + "best_test_score": f"{best['test_passed']}/{best['test_total']}" if test_set else None, + "final_description": current_description, + "iterations_run": len(history), + "holdout": holdout, + "train_size": len(train_set), + "test_size": len(test_set), + "history": history, + } + + +def main(): + parser = argparse.ArgumentParser(description="Run eval + improve loop") + parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file") + parser.add_argument("--skill-path", required=True, help="Path to skill directory") + parser.add_argument("--description", default=None, help="Override starting description") + parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers") + parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds") + parser.add_argument("--max-iterations", type=int, default=5, help="Max improvement iterations") + parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query") + parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold") + parser.add_argument("--holdout", type=float, default=0.4, help="Fraction of eval set to hold out for testing (0 to disable)") + parser.add_argument("--model", required=True, help="Model for improvement") + parser.add_argument("--verbose", action="store_true", help="Print progress to stderr") + parser.add_argument("--report", default="auto", help="Generate HTML report at this path (default: 'auto' for temp file, 'none' to disable)") + parser.add_argument("--results-dir", default=None, help="Save all outputs (results.json, report.html, log.txt) to a timestamped subdirectory here") + args = parser.parse_args() + + eval_set = json.loads(Path(args.eval_set).read_text()) + skill_path = Path(args.skill_path) + + if not (skill_path / "SKILL.md").exists(): + print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr) + sys.exit(1) + + name, _, _ = parse_skill_md(skill_path) + + # Set up live report path + if args.report != "none": + if args.report == "auto": + timestamp = time.strftime("%Y%m%d_%H%M%S") + live_report_path = Path(tempfile.gettempdir()) / f"skill_description_report_{skill_path.name}_{timestamp}.html" + else: + live_report_path = Path(args.report) + # Open the report immediately so the user can watch + live_report_path.write_text("

Starting optimization loop...

") + webbrowser.open(str(live_report_path)) + else: + live_report_path = None + + # Determine output directory (create before run_loop so logs can be written) + if args.results_dir: + timestamp = time.strftime("%Y-%m-%d_%H%M%S") + results_dir = Path(args.results_dir) / timestamp + results_dir.mkdir(parents=True, exist_ok=True) + else: + results_dir = None + + log_dir = results_dir / "logs" if results_dir else None + + output = run_loop( + eval_set=eval_set, + skill_path=skill_path, + description_override=args.description, + num_workers=args.num_workers, + timeout=args.timeout, + max_iterations=args.max_iterations, + runs_per_query=args.runs_per_query, + trigger_threshold=args.trigger_threshold, + holdout=args.holdout, + model=args.model, + verbose=args.verbose, + live_report_path=live_report_path, + log_dir=log_dir, + ) + + # Save JSON output + json_output = json.dumps(output, indent=2) + print(json_output) + if results_dir: + (results_dir / "results.json").write_text(json_output) + + # Write final HTML report (without auto-refresh) + if live_report_path: + live_report_path.write_text(generate_html(output, auto_refresh=False, skill_name=name)) + print(f"\nReport: {live_report_path}", file=sys.stderr) + + if results_dir and live_report_path: + (results_dir / "report.html").write_text(generate_html(output, auto_refresh=False, skill_name=name)) + + if results_dir: + print(f"Results saved to: {results_dir}", file=sys.stderr) + + +if __name__ == "__main__": + main() diff --git a/.agents/skills/skill-creator/scripts/utils.py b/.agents/skills/skill-creator/scripts/utils.py new file mode 100644 index 000000000000..51b6a07dd571 --- /dev/null +++ b/.agents/skills/skill-creator/scripts/utils.py @@ -0,0 +1,47 @@ +"""Shared utilities for skill-creator scripts.""" + +from pathlib import Path + + + +def parse_skill_md(skill_path: Path) -> tuple[str, str, str]: + """Parse a SKILL.md file, returning (name, description, full_content).""" + content = (skill_path / "SKILL.md").read_text() + lines = content.split("\n") + + if lines[0].strip() != "---": + raise ValueError("SKILL.md missing frontmatter (no opening ---)") + + end_idx = None + for i, line in enumerate(lines[1:], start=1): + if line.strip() == "---": + end_idx = i + break + + if end_idx is None: + raise ValueError("SKILL.md missing frontmatter (no closing ---)") + + name = "" + description = "" + frontmatter_lines = lines[1:end_idx] + i = 0 + while i < len(frontmatter_lines): + line = frontmatter_lines[i] + if line.startswith("name:"): + name = line[len("name:"):].strip().strip('"').strip("'") + elif line.startswith("description:"): + value = line[len("description:"):].strip() + # Handle YAML multiline indicators (>, |, >-, |-) + if value in (">", "|", ">-", "|-"): + continuation_lines: list[str] = [] + i += 1 + while i < len(frontmatter_lines) and (frontmatter_lines[i].startswith(" ") or frontmatter_lines[i].startswith("\t")): + continuation_lines.append(frontmatter_lines[i].strip()) + i += 1 + description = " ".join(continuation_lines) + continue + else: + description = value.strip('"').strip("'") + i += 1 + + return name, description, content diff --git a/.agents/skills/skill-scanner/references/permission-analysis.md b/.agents/skills/skill-scanner/references/permission-analysis.md index 8d06d3c67d24..e7b7ae5d8c9f 100644 --- a/.agents/skills/skill-scanner/references/permission-analysis.md +++ b/.agents/skills/skill-scanner/references/permission-analysis.md @@ -49,7 +49,7 @@ Expected tool sets by skill type: - **Expected**: `Read, Grep, Glob, Bash` - **Bash justification**: Git operations, CI commands, gh CLI -- **Examples**: commit, create-pr, iterate-pr +- **Examples**: commit, pr-writer, iterate-pr ### Content Generation Skills diff --git a/.craft.yml b/.craft.yml index aa9119014db4..f64414ea19a4 100644 --- a/.craft.yml +++ b/.craft.yml @@ -100,6 +100,9 @@ targets: - name: npm id: '@sentry/nestjs' includeNames: /^sentry-nestjs-\d.*\.tgz$/ + - name: npm + id: '@sentry/effect' + includeNames: /^sentry-effect-\d.*\.tgz$/ ## 6. Fullstack/Meta Frameworks (depending on Node and Browser or Framework SDKs) - name: npm @@ -237,3 +240,8 @@ targets: onlyIfPresent: /^sentry-vue-\d.*\.tgz$/ 'npm:@sentry/wasm': onlyIfPresent: /^sentry-wasm-\d.*\.tgz$/ + 'npm:@sentry/effect': + name: 'Sentry Effect SDK' + sdkName: 'sentry.javascript.effect' + packageUrl: 'https://www.npmjs.com/package/@sentry/effect' + onlyIfPresent: /^sentry-effect-\d.*\.tgz$/ diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index ad71500f1193..ea3bf6f9afc6 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -27,3 +27,9 @@ c88ff463a5566194a454b58bc555f183cf9ee813 # chore: Unignore HTML files and reformat with oxfmt (#19311) b1d25bb2462feb02defcdb28221759e26c115d99 + +# chore: migrate to oxlint (#19134) +413041a34e748582af38c90067cd573f15c85add + +# chore(lint): Rule adjustments and fix warnings (#19612) +a0c7d4029bc6f1100ea7e901e78fb5374dc308e8 diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index c09984de5c3b..305e975b48fd 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -45,6 +45,7 @@ body: - '@sentry/cloudflare' - '@sentry/cloudflare - hono' - '@sentry/deno' + - '@sentry/effect' - '@sentry/ember' - '@sentry/gatsby' - '@sentry/google-cloud-serverless' diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5b84a70ffbd6..3228c64e6059 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -65,59 +65,11 @@ env: jobs: job_get_metadata: - name: Get Metadata - runs-on: ubuntu-24.04 + uses: ./.github/workflows/ci-metadata.yml + with: + head_commit: ${{ github.event.inputs.commit || github.sha }} permissions: pull-requests: read - steps: - - name: Check out current commit - uses: actions/checkout@v6 - with: - ref: ${{ env.HEAD_COMMIT }} - # We need to check out not only the fake merge commit between the PR and the base branch which GH creates, but - # also its parents, so that we can pull the commit message from the head commit of the PR - fetch-depth: 2 - - - name: Get metadata - id: get_metadata - # We need to try a number of different options for finding the head commit, because each kind of trigger event - # stores it in a different location - run: | - COMMIT_SHA=$(git rev-parse --short ${{ github.event.pull_request.head.sha || github.event.head_commit.id || env.HEAD_COMMIT }}) - echo "COMMIT_SHA=$COMMIT_SHA" >> $GITHUB_ENV - echo "COMMIT_MESSAGE=$(git log -n 1 --pretty=format:%s $COMMIT_SHA)" >> $GITHUB_ENV - - # Most changed packages are determined in job_build via Nx - - name: Determine changed packages - uses: dorny/paths-filter@v3.0.1 - id: changed - with: - filters: | - workflow: - - '.github/**' - any_code: - - '!**/*.md' - - - name: Get PR labels - id: pr-labels - uses: mydea/pr-labels-action@fn/bump-node20 - - outputs: - commit_label: '${{ env.COMMIT_SHA }}: ${{ env.COMMIT_MESSAGE }}' - # Note: These next three have to be checked as strings ('true'/'false')! - is_base_branch: - ${{ github.ref == 'refs/heads/develop' || github.ref == 'refs/heads/v9' || github.ref == 'refs/heads/v8'}} - is_release: ${{ startsWith(github.ref, 'refs/heads/release/') }} - changed_ci: ${{ steps.changed.outputs.workflow == 'true' }} - changed_any_code: ${{ steps.changed.outputs.any_code == 'true' }} - - # When merging into master, or from master - is_gitflow_sync: ${{ github.head_ref == 'master' || github.ref == 'refs/heads/master' }} - has_gitflow_label: - ${{ github.event_name == 'pull_request' && contains(steps.pr-labels.outputs.labels, ' Gitflow ') }} - force_skip_cache: - ${{ github.event_name == 'schedule' || (github.event_name == 'pull_request' && - contains(steps.pr-labels.outputs.labels, ' ci-skip-cache ')) }} job_build: name: Build @@ -308,7 +260,7 @@ jobs: with: dependency_cache_key: ${{ needs.job_build.outputs.dependency_cache_key }} - name: Lint source files - run: yarn lint:oxlint + run: yarn lint - name: Lint for ES compatibility run: yarn lint:es-compatibility diff --git a/.github/workflows/ci-metadata.yml b/.github/workflows/ci-metadata.yml new file mode 100644 index 000000000000..c4fca988d724 --- /dev/null +++ b/.github/workflows/ci-metadata.yml @@ -0,0 +1,89 @@ +name: 'Get Metadata' +on: + workflow_call: + inputs: + head_commit: + description: 'The commit SHA to check out and test' + type: string + required: true + outputs: + commit_label: + description: 'Short SHA + commit message for display' + value: ${{ jobs.get_metadata.outputs.commit_label }} + is_base_branch: + description: 'Whether the ref is develop, v9, or v8' + value: ${{ jobs.get_metadata.outputs.is_base_branch }} + is_release: + description: 'Whether the ref is a release branch' + value: ${{ jobs.get_metadata.outputs.is_release }} + changed_ci: + description: 'Whether .github/** files changed' + value: ${{ jobs.get_metadata.outputs.changed_ci }} + changed_any_code: + description: 'Whether any non-markdown files changed' + value: ${{ jobs.get_metadata.outputs.changed_any_code }} + is_gitflow_sync: + description: 'Whether this is a gitflow sync (master merge)' + value: ${{ jobs.get_metadata.outputs.is_gitflow_sync }} + has_gitflow_label: + description: 'Whether the PR has the Gitflow label' + value: ${{ jobs.get_metadata.outputs.has_gitflow_label }} + force_skip_cache: + description: 'Whether to skip caching (schedule or ci-skip-cache label)' + value: ${{ jobs.get_metadata.outputs.force_skip_cache }} + +jobs: + get_metadata: + name: Get Metadata + runs-on: ubuntu-24.04 + permissions: + pull-requests: read + steps: + - name: Check out current commit + uses: actions/checkout@v6 + with: + ref: ${{ inputs.head_commit }} + # We need to check out not only the fake merge commit between the PR and the base branch which GH creates, but + # also its parents, so that we can pull the commit message from the head commit of the PR + fetch-depth: 2 + + - name: Get metadata + id: get_metadata + # We need to try a number of different options for finding the head commit, because each kind of trigger event + # stores it in a different location + run: | + COMMIT_SHA=$(git rev-parse --short ${{ github.event.pull_request.head.sha || github.event.head_commit.id || inputs.head_commit }}) + echo "COMMIT_SHA=$COMMIT_SHA" >> $GITHUB_ENV + echo "COMMIT_MESSAGE=$(git log -n 1 --pretty=format:%s $COMMIT_SHA)" >> $GITHUB_ENV + + # Most changed packages are determined in job_build via Nx + - name: Determine changed packages + uses: dorny/paths-filter@v3.0.1 + id: changed + with: + filters: | + workflow: + - '.github/**' + any_code: + - '!**/*.md' + + - name: Get PR labels + id: pr-labels + uses: mydea/pr-labels-action@fn/bump-node20 + + outputs: + commit_label: '${{ env.COMMIT_SHA }}: ${{ env.COMMIT_MESSAGE }}' + # Note: These next three have to be checked as strings ('true'/'false')! + is_base_branch: + ${{ github.ref == 'refs/heads/develop' || github.ref == 'refs/heads/v9' || github.ref == 'refs/heads/v8'}} + is_release: ${{ startsWith(github.ref, 'refs/heads/release/') }} + changed_ci: ${{ steps.changed.outputs.workflow == 'true' }} + changed_any_code: ${{ steps.changed.outputs.any_code == 'true' }} + + # When merging into master, or from master + is_gitflow_sync: ${{ github.head_ref == 'master' || github.ref == 'refs/heads/master' }} + has_gitflow_label: + ${{ github.event_name == 'pull_request' && contains(steps.pr-labels.outputs.labels, ' Gitflow ') }} + force_skip_cache: + ${{ github.event_name == 'schedule' || (github.event_name == 'pull_request' && + contains(steps.pr-labels.outputs.labels, ' ci-skip-cache ')) }} diff --git a/.oxlintrc.json b/.oxlintrc.json index 6a11fcc33977..ef23a888fab8 100644 --- a/.oxlintrc.json +++ b/.oxlintrc.json @@ -1,6 +1,6 @@ { "$schema": "./node_modules/oxlint/configuration_schema.json", - "plugins": ["typescript", "import", "jsdoc", "jest", "vitest"], + "plugins": ["typescript", "import", "jsdoc", "vitest"], "jsPlugins": [ { "name": "sdk", @@ -9,6 +9,11 @@ ], "categories": {}, "rules": { + "no-unused-vars": [ + "warn", + { "argsIgnorePattern": "^_", "varsIgnorePattern": "^_", "caughtErrorsIgnorePattern": "^_" } + ], + // === Base rules from eslint-config-sdk/base.js === "no-console": "error", "no-alert": "error", @@ -27,28 +32,18 @@ "import/namespace": "off", "import/no-unresolved": "off", - // === Jest/Vitest rules === - "jest/no-focused-tests": "error", - "jest/no-disabled-tests": "error", - // === Rules turned off (not enforced in ESLint or causing false positives) === "no-control-regex": "off", "jsdoc/check-tag-names": "off", "jsdoc/require-yields": "off", "no-useless-rename": "off", "no-constant-binary-expression": "off", - "jest/no-conditional-expect": "off", - "jest/expect-expect": "off", - "jest/no-standalone-expect": "off", - "jest/require-to-throw-message": "off", - "jest/valid-title": "off", - "jest/no-export": "off", - "jest/valid-describe-callback": "off", "vitest/hoisted-apis-on-top": "off", "vitest/no-conditional-tests": "off", "no-unsafe-optional-chaining": "off", "no-eval": "off", "no-import-assign": "off", + "typescript/no-duplicate-type-constituents": "off", // === Custom SDK rules (via JS plugin) === "sdk/no-eq-empty": "error" @@ -61,17 +56,17 @@ "typescript/consistent-type-imports": "error", "typescript/no-unnecessary-type-assertion": "error", "typescript/prefer-for-of": "error", - // "typescript/no-floating-promises": ["error", { "ignoreVoid": false }], + "typescript/no-floating-promises": ["error", { "ignoreVoid": true }], "typescript/no-dynamic-delete": "error", - // "typescript/no-unsafe-member-access": "error", + "typescript/no-unsafe-member-access": "error", "typescript/unbound-method": "error", "typescript/no-explicit-any": "error", "typescript/no-empty-function": "off", - - // === FIXME: Rules to turn back as error === - "typescript/prefer-optional-chain": "warn", - "typescript/no-floating-promises": "warn", - "typescript/no-unsafe-member-access": "warn" + "typescript/prefer-optional-chain": ["error"], + "typescript/no-redundant-type-constituents": "off", + "typescript/restrict-template-expressions": "off", + "typescript/await-thenable": "warn", + "typescript/no-base-to-string": "warn" } }, { @@ -111,7 +106,12 @@ "typescript/no-floating-promises": "off", "typescript/unbound-method": "off", "max-lines": "off", - "complexity": "off" + "complexity": "off", + "typescript/prefer-optional-chain": "off", + "typescript/no-misused-spread": "off", + "typescript/require-array-sort-compare": "off", + "typescript/no-base-to-string": "off", + "typescript/await-thenable": "off" } }, { diff --git a/.size-limit.js b/.size-limit.js index 38a83445d021..750e7ce8f7fd 100644 --- a/.size-limit.js +++ b/.size-limit.js @@ -82,7 +82,7 @@ module.exports = [ path: 'packages/browser/build/npm/esm/prod/index.js', import: createImport('init', 'browserTracingIntegration', 'replayIntegration', 'replayCanvasIntegration'), gzip: true, - limit: '86 KB', + limit: '87 KB', }, { name: '@sentry/browser (incl. Tracing, Replay, Feedback)', @@ -124,7 +124,7 @@ module.exports = [ path: 'packages/browser/build/npm/esm/prod/index.js', import: createImport('init', 'logger'), gzip: true, - limit: '27 KB', + limit: '28 KB', }, { name: '@sentry/browser (incl. Metrics & Logs)', @@ -255,21 +255,21 @@ module.exports = [ path: createCDNPath('bundle.tracing.logs.metrics.min.js'), gzip: false, brotli: false, - limit: '131 KB', + limit: '132 KB', }, { name: 'CDN Bundle (incl. Replay, Logs, Metrics) - uncompressed', path: createCDNPath('bundle.replay.logs.metrics.min.js'), gzip: false, brotli: false, - limit: '209 KB', + limit: '210 KB', }, { name: 'CDN Bundle (incl. Tracing, Replay) - uncompressed', path: createCDNPath('bundle.tracing.replay.min.js'), gzip: false, brotli: false, - limit: '245 KB', + limit: '246 KB', }, { name: 'CDN Bundle (incl. Tracing, Replay, Logs, Metrics) - uncompressed', @@ -308,7 +308,7 @@ module.exports = [ import: createImport('init'), ignore: ['$app/stores'], gzip: true, - limit: '43 KB', + limit: '44 KB', }, // Node-Core SDK (ESM) { @@ -317,7 +317,7 @@ module.exports = [ import: createImport('init'), ignore: [...builtinModules, ...nodePrefixedBuiltinModules], gzip: true, - limit: '53 KB', + limit: '57 KB', }, // Node SDK (ESM) { @@ -326,7 +326,7 @@ module.exports = [ import: createImport('init'), ignore: [...builtinModules, ...nodePrefixedBuiltinModules], gzip: true, - limit: '175 KB', + limit: '176 KB', }, { name: '@sentry/node - without tracing', diff --git a/.vscode/settings.json b/.vscode/settings.json index 43c91d3fc4af..37ff1f20dd2e 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -28,5 +28,6 @@ "editor.defaultFormatter": "oxc.oxc-vscode", "[typescript]": { "editor.defaultFormatter": "oxc.oxc-vscode" - } + }, + "oxc.suppressProgramErrors": true } diff --git a/AGENTS.md b/AGENTS.md index 08d5d2bac779..bea38f66e2d9 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -20,9 +20,12 @@ Use **yarn**: `yarn install`, `yarn build:dev`, `yarn test`, `yarn lint` | `yarn build:dev:filter @sentry/` | Build one package + deps | | `yarn build:bundle` | Browser bundles only | | `yarn test` | All unit tests | -| `yarn lint` | Oxlint + Oxfmt | -| `yarn fix` | Auto-fix lint + format | -| `yarn format` | Auto-fix formatting (Oxfmt) | +| `yarn verify` | Lint + format check | +| `yarn fix` | Format + lint fix | +| `yarn lint` | Lint (Oxlint) | +| `yarn lint:fix` | Lint + auto-fix (Oxlint) | +| `yarn format` | Format files (Oxfmt) | +| `yarn format:check` | Check formatting (Oxfmt) | Single package: `cd packages/ && yarn test` @@ -91,6 +94,14 @@ Uses **Git Flow** (see `docs/gitflow.md`). - `test-utils/` — Shared test utilities - `rollup-utils/` — Build utilities +## Linting & Formatting + +- This project uses **Oxlint** and **Oxfmt** — NOT ESLint or Prettier +- Never run `eslint`, `npx eslint`, or any ESLint CLI — use `yarn lint` (Oxlint) instead +- Never run `prettier` — use `yarn format` (Oxfmt) instead +- ESLint packages in the repo are legacy/e2e test app dependencies — ignore them +- Do not create, modify, or suggest `.eslintrc`, `eslint.config.*`, or `.prettierrc` files + ## Coding Standards - Follow existing conventions — check neighboring files diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e1aa175f9bc..1f85d90681d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,141 @@ - "You miss 100 percent of the chances you don't take. — Wayne Gretzky" — Michael Scott +## 10.44.0 + +### Important Changes + +- **feat(effect): Add `@sentry/effect` SDK (Alpha) ([#19644](https://github.com/getsentry/sentry-javascript/pull/19644))** + + This release introduces `@sentry/effect`, a new SDK for [Effect.ts](https://effect.website/) applications. The SDK provides Sentry integration via composable Effect layers for both Node.js and browser environments. + + Compose the `effectLayer` with optional tracing, logging, and metrics layers to instrument your Effect application: + + ```typescript + import * as Sentry from '@sentry/effect'; + import * as Layer from 'effect/Layer'; + import * as Logger from 'effect/Logger'; + + const SentryLive = Layer.mergeAll( + Sentry.effectLayer({ dsn: '__DSN__', tracesSampleRate: 1.0, enableLogs: true }), + Layer.setTracer(Sentry.SentryEffectTracer), + Logger.replace(Logger.defaultLogger, Sentry.SentryEffectLogger), + Sentry.SentryEffectMetricsLayer, + ); + ``` + + Alpha features are still in progress, may have bugs and might include breaking changes. Please reach out on GitHub if you have any feedback or concerns. + +- **feat(astro): Add Astro 6 support ([#19745](https://github.com/getsentry/sentry-javascript/pull/19745))** + + This release enables full support for Astro v6 by adjusting our Astro SDK's middleware to some Astro-internal + changes. We cannot yet guarantee full support for server-islands, due to a [bug in Astro v6](https://github.com/withastro/astro/issues/15753) + but we'll follow up on this once the bug is fixed. + +- **feat(hono): Add basic instrumentation for Node runtime ([#19817](https://github.com/getsentry/sentry-javascript/pull/19817))** + + Adds a new package `@sentry/hono/node` (alpha) with basic instrumentation for Hono applications running in Node.js. + The Hono middleware for Cloudflare (`@sentry/hono/cloudflare` - alpha) comes with fixes, and it's now possible to access the Cloudflare Worker Bindings (`env`) from the options' callback. + + Start using the new Hono middlewares by installing `@sentry/hono` and importing the respective middleware for your runtime. + More instructions can be found in the [Hono readme](https://github.com/getsentry/sentry-javascript/blob/develop/packages/hono/README.md). + + Alpha features are still in progress, may have bugs and might include breaking changes. Please reach out on GitHub if you have any feedback or concerns. + +- **feat(nestjs): Instrument `@nestjs/bullmq` `@Processor` decorator ([#19759](https://github.com/getsentry/sentry-javascript/pull/19759))** + + Automatically capture exceptions and create transactions for BullMQ queue processors in NestJS applications. + + When using the `@Processor` decorator from `@nestjs/bullmq`, the SDK now automatically wraps the `process()` method + to create `queue.process` transactions with proper isolation scopes, preventing breadcrumb and scope leakage between + jobs and HTTP requests. Errors thrown in processors are captured with the `auto.queue.nestjs.bullmq` mechanism type. + + Requires `@nestjs/bullmq` v10.0.0 or later. + +- **feat(nestjs): Instrument `@nestjs/schedule` decorators ([#19735](https://github.com/getsentry/sentry-javascript/pull/19735))** + + Automatically capture exceptions thrown in `@Cron`, `@Interval`, and `@Timeout` decorated methods. + + Previously, exceptions in `@Cron` methods were only captured if you used the `SentryCron` decorator. Now they are + captured automatically. The exception mechanism type changed from `auto.cron.nestjs.async` to + `auto.function.nestjs.cron`. If you have Sentry queries or alerts that filter on the old mechanism type, update them + accordingly. + +- **feat(node): Expose `headersToSpanAttributes` option on `nativeNodeFetchIntegration()` ([#19770](https://github.com/getsentry/sentry-javascript/pull/19770))** + + Response headers like `http.response.header.content-length` were previously captured automatically on outgoing + fetch spans but are now opt-in since `@opentelemetry/instrumentation-undici@0.22.0`. You can now configure which + headers to capture via the `headersToSpanAttributes` option. + + ```js + Sentry.init({ + integrations: [ + Sentry.nativeNodeFetchIntegration({ + headersToSpanAttributes: { + requestHeaders: ['x-custom-header'], + responseHeaders: ['content-length', 'content-type'], + }, + }), + ], + }); + ``` + +### Other Changes + +- feat(browser/cloudflare): Export conversation id from browser and cloudflare runtimes ([#19820](https://github.com/getsentry/sentry-javascript/pull/19820)) +- feat(bun): Set http response header attributes instead of response context headers ([#19821](https://github.com/getsentry/sentry-javascript/pull/19821)) +- feat(core): Add `sentry.timestamp.sequence` attribute for timestamp tie-breaking ([#19421](https://github.com/getsentry/sentry-javascript/pull/19421)) +- feat(deno): Set http response header attributes instead of response context headers ([#19822](https://github.com/getsentry/sentry-javascript/pull/19822)) +- feat(deps): Bump OpenTelemetry dependencies ([#19682](https://github.com/getsentry/sentry-javascript/pull/19682)) +- feat(nestjs): Use more specific span origins for NestJS guards, pipes, interceptors, and exception filters ([#19751](https://github.com/getsentry/sentry-javascript/pull/19751)) +- feat(nextjs): Vercel queue instrumentation ([#19799](https://github.com/getsentry/sentry-javascript/pull/19799)) +- feat(node): Avoid OTEL instrumentation for outgoing requests on Node 22+ ([#17355](https://github.com/getsentry/sentry-javascript/pull/17355)) +- feat(deps): bump hono from 4.12.5 to 4.12.7 ([#19747](https://github.com/getsentry/sentry-javascript/pull/19747)) +- feat(deps): bump mysql2 from 3.14.4 to 3.19.1 ([#19787](https://github.com/getsentry/sentry-javascript/pull/19787)) +- feat(deps): bump simple-git from 3.30.0 to 3.33.0 ([#19744](https://github.com/getsentry/sentry-javascript/pull/19744)) +- feat(deps): bump yauzl from 3.2.0 to 3.2.1 ([#19809](https://github.com/getsentry/sentry-javascript/pull/19809)) +- fix(browser): Skip browserTracingIntegration setup for bot user agents ([#19708](https://github.com/getsentry/sentry-javascript/pull/19708)) +- fix(cloudflare): Recreate client when previous one was disposed ([#19727](https://github.com/getsentry/sentry-javascript/pull/19727)) +- fix(core): Align Vercel embedding spans with semantic conventions ([#19795](https://github.com/getsentry/sentry-javascript/pull/19795)) +- fix(core): Fallback to `sendDefaultPii` setting in langchain and langgraph in non-node environments ([#19813](https://github.com/getsentry/sentry-javascript/pull/19813)) +- fix(core): Improve Vercel AI SDK instrumentation attributes ([#19717](https://github.com/getsentry/sentry-javascript/pull/19717)) +- fix(hono): Align error mechanism ([#19831](https://github.com/getsentry/sentry-javascript/pull/19831)) +- fix(hono): Allow passing env and fix type issues ([#19825](https://github.com/getsentry/sentry-javascript/pull/19825)) +- fix(nestjs): Fork isolation scope in `@nestjs/event-emitter` instrumentation ([#19725](https://github.com/getsentry/sentry-javascript/pull/19725)) +- fix(nextjs): Log correct `lastEventId` when error is thrown in component render ([#19764](https://github.com/getsentry/sentry-javascript/pull/19764)) +- fix(nextjs): Strip sourceMappingURL comments after deleting source maps in turbopack builds ([#19814](https://github.com/getsentry/sentry-javascript/pull/19814)) +- fix(nuxt): Upload client source maps ([#19805](https://github.com/getsentry/sentry-javascript/pull/19805)) +- fix(profiling-node): Fix NODE_VERSION rendered as [object Object] in warning ([#19788](https://github.com/getsentry/sentry-javascript/pull/19788)) + +
+ Internal Changes + +- chore: Add oxlint migration commits to blame ignore ([#19784](https://github.com/getsentry/sentry-javascript/pull/19784)) +- chore: add oxlint typescript program suppression to workspace settings ([#19692](https://github.com/getsentry/sentry-javascript/pull/19692)) +- chore: Bump oxlint and oxfmt ([#19771](https://github.com/getsentry/sentry-javascript/pull/19771)) +- chore: Clean up lint and format script names ([#19719](https://github.com/getsentry/sentry-javascript/pull/19719)) +- chore(agents): Be more explicit on linting and formatting ([#19803](https://github.com/getsentry/sentry-javascript/pull/19803)) +- chore(ci): Extract metadata workflow ([#19680](https://github.com/getsentry/sentry-javascript/pull/19680)) +- chore(deps): bump tedious from 18.6.1 to 19.2.1 ([#19786](https://github.com/getsentry/sentry-javascript/pull/19786)) +- chore(deps-dev): bump file-type from 20.5.0 to 21.3.1 ([#19748](https://github.com/getsentry/sentry-javascript/pull/19748)) +- chore(effect): Add Effect to craft, README and issue templates ([#19837](https://github.com/getsentry/sentry-javascript/pull/19837)) +- chore(lint): Rule adjustments and fix warnings ([#19612](https://github.com/getsentry/sentry-javascript/pull/19612)) +- chore(skills): Add `skill-creator` and update managed agent skills ([#19713](https://github.com/getsentry/sentry-javascript/pull/19713)) +- docs(changelog): Add entry for `@sentry/hono` alpha release ([#19828](https://github.com/getsentry/sentry-javascript/pull/19828)) +- docs(hono): Document usage without `"*"` ([#19756](https://github.com/getsentry/sentry-javascript/pull/19756)) +- docs(new-release): Document `sdkName` for craft ([#19736](https://github.com/getsentry/sentry-javascript/pull/19736)) +- docs(new-release): Update docs based on new Craft flow ([#19731](https://github.com/getsentry/sentry-javascript/pull/19731)) +- ref(cloudflare): Prepare for WorkerEntrypoint ([#19742](https://github.com/getsentry/sentry-javascript/pull/19742)) +- ref(nestjs): Move event instrumentation unit tests to separate file ([#19738](https://github.com/getsentry/sentry-javascript/pull/19738)) +- style: Auto changes made from "yarn fix" ([#19710](https://github.com/getsentry/sentry-javascript/pull/19710)) +- test(astro,cloudflare): Add an E2E test for Astro 6 on Cloudflare ([#19781](https://github.com/getsentry/sentry-javascript/pull/19781)) +- test(browser): Add simulated mfe integration test ([#19768](https://github.com/getsentry/sentry-javascript/pull/19768)) +- test(e2e): Add MFE e2e test using `vite-plugin-federation` ([#19778](https://github.com/getsentry/sentry-javascript/pull/19778)) +- test(nextjs): Add vercel queue tests to next-16 ([#19798](https://github.com/getsentry/sentry-javascript/pull/19798)) +- tests(core): Fix flaky metric sequence number test ([#19754](https://github.com/getsentry/sentry-javascript/pull/19754)) + +
+ ## 10.43.0 ### Important Changes diff --git a/README.md b/README.md index 5a3453dbe004..5ac7fefc3b81 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,7 @@ package. Please refer to the README and instructions of those SDKs for more deta native crashes - [`@sentry/capacitor`](https://github.com/getsentry/sentry-capacitor): SDK for Capacitor Apps and Ionic with support for native crashes +- [`@sentry/effect`](https://github.com/getsentry/sentry-javascript/tree/master/packages/effect): SDK for Effect (Alpha) - [`@sentry/bun`](https://github.com/getsentry/sentry-javascript/tree/master/packages/bun): SDK for Bun - [`@sentry/deno`](https://github.com/getsentry/sentry-javascript/tree/master/packages/deno): SDK for Deno - [`@sentry/cloudflare`](https://github.com/getsentry/sentry-javascript/tree/master/packages/cloudflare): SDK for diff --git a/agents.lock b/agents.lock index 20d1e2edb731..56004eb4c829 100644 --- a/agents.lock +++ b/agents.lock @@ -13,8 +13,8 @@ integrity = "sha256-EWfTlMvQtawp0i453jhIozX6pGNld+5fKXwMHhIa1KQ=" source = "getsentry/dotagents" resolved_url = "https://github.com/getsentry/dotagents.git" resolved_path = "skills/dotagents" -commit = "84ec01d363fdd50b47f2baefed742d27a564c210" -integrity = "sha256-bVx96wBmjIF6NPfPH7GMDWUJLulbAHWZhRWi1UAZ6Ws=" +commit = "a71b92d3cca4d0ed7bd763b611771ff9c81d50c8" +integrity = "sha256-/KBw6Ea4epWV0EiNUF32dEQKd7vcBhns31rSI6D4R1o=" [skills.e2e] source = "path:.agents/skills/e2e" @@ -28,6 +28,20 @@ integrity = "sha256-J5OmnVv+u8fjERNeDkaxgLuM3c/rrHKfpEe9gIedeZk=" source = "path:.agents/skills/release" integrity = "sha256-/5xBn5M/VGzyi18Q1Llui5aASIsYsvE7sdMSUf1dm4Q=" +[skills.skill-creator] +source = "anthropics/skills" +resolved_url = "https://github.com/anthropics/skills.git" +resolved_path = "skills/skill-creator" +commit = "b0cbd3df1533b396d281a6886d5132f623393a9c" +integrity = "sha256-vR9WjFQ+qi7pH+1usde0vuZwbMr4pPVVEu8UR3jn/NA=" + +[skills.skill-scanner] +source = "getsentry/skills" +resolved_url = "https://github.com/getsentry/skills.git" +resolved_path = ".agents/skills/skill-scanner" +commit = "360070a6ed7a7530ff1df26adb285807f4483ffa" +integrity = "sha256-IlmfhBZzW5gl9117KXUqjAekn4iORqDpjM3+EPS6Ods=" + [skills.triage-issue] source = "path:.agents/skills/triage-issue" integrity = "sha256-Oxwx2zTEr0UY3JnOw7l0O2pa7/CunntqZTUtSeWJvh0=" @@ -39,10 +53,3 @@ integrity = "sha256-IMo0XcsfNtduSQzNZLsrXD/Qg0aE6loetoM0qIqYatA=" [skills.upgrade-otel] source = "path:.agents/skills/upgrade-otel" integrity = "sha256-PnfUymsVK2zWTGNPOvL2XkIXLWta0RpVTVDcvQC5q8w=" - -[skills.skill-scanner] -source = "getsentry/skills" -resolved_url = "https://github.com/getsentry/skills.git" -resolved_path = ".agents/skills/skill-scanner" -commit = "b68ac5ce82c981ac3235dd6f2037c1109baaf0f2" -integrity = "sha256-IleKDxGpne+9g/048+q4wBv7MkfZPIYFX78TnLjeGyQ=" diff --git a/agents.toml b/agents.toml index c256e72df073..6aa08a89bcf2 100644 --- a/agents.toml +++ b/agents.toml @@ -7,7 +7,7 @@ agents = ["claude", "cursor"] [trust] github_orgs = ["getsentry"] -github_repos = ["getsentry/skills"] +github_repos = ["getsentry/skills", "anthropics/skills"] [[skills]] @@ -53,3 +53,7 @@ source = "path:.agents/skills/upgrade-otel" [[skills]] name = "skill-scanner" source = "getsentry/skills" + +[[skills]] +name = "skill-creator" +source = "anthropics/skills" diff --git a/dev-packages/.oxlintrc.json b/dev-packages/.oxlintrc.json index f44c8f60b0db..72497867a535 100644 --- a/dev-packages/.oxlintrc.json +++ b/dev-packages/.oxlintrc.json @@ -4,6 +4,10 @@ "rules": { "typescript/no-explicit-any": "off", "max-lines": "off", - "no-unused-expressions": "off" + "no-unused-expressions": "off", + "typescript/require-array-sort-compare": "off", + "typescript/no-misused-spread": "off", + "typescript/no-base-to-string": "off", + "typescript/await-thenable": "off" } } diff --git a/dev-packages/browser-integration-tests/package.json b/dev-packages/browser-integration-tests/package.json index e8257a0a9a85..fecaecf4c4f1 100644 --- a/dev-packages/browser-integration-tests/package.json +++ b/dev-packages/browser-integration-tests/package.json @@ -10,8 +10,8 @@ "scripts": { "clean": "rimraf -g suites/**/dist loader-suites/**/dist tmp", "install-browsers": "[[ -z \"$SKIP_PLAYWRIGHT_BROWSER_INSTALL\" ]] && npx playwright install --with-deps || echo 'Skipping browser installation'", - "lint": "oxlint .", - "fix": "oxlint . --fix", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "type-check": "tsc", "postinstall": "yarn install-browsers", "pretest": "yarn clean && yarn type-check", diff --git a/dev-packages/browser-integration-tests/suites/public-api/logger/integration/test.ts b/dev-packages/browser-integration-tests/suites/public-api/logger/integration/test.ts index 40c2d18d29bd..7315e8cf4f36 100644 --- a/dev-packages/browser-integration-tests/suites/public-api/logger/integration/test.ts +++ b/dev-packages/browser-integration-tests/suites/public-api/logger/integration/test.ts @@ -34,6 +34,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'console.trace {} {}', type: 'string' }, 'sentry.message.parameter.0': { value: 123, type: 'integer' }, 'sentry.message.parameter.1': { value: false, type: 'boolean' }, @@ -49,6 +50,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'console.debug {} {}', type: 'string' }, 'sentry.message.parameter.0': { value: 123, type: 'integer' }, 'sentry.message.parameter.1': { value: false, type: 'boolean' }, @@ -64,6 +66,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'console.log {} {}', type: 'string' }, 'sentry.message.parameter.0': { value: 123, type: 'integer' }, 'sentry.message.parameter.1': { value: false, type: 'boolean' }, @@ -79,6 +82,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'console.info {} {}', type: 'string' }, 'sentry.message.parameter.0': { value: 123, type: 'integer' }, 'sentry.message.parameter.1': { value: false, type: 'boolean' }, @@ -94,6 +98,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'console.warn {} {}', type: 'string' }, 'sentry.message.parameter.0': { value: 123, type: 'integer' }, 'sentry.message.parameter.1': { value: false, type: 'boolean' }, @@ -109,6 +114,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'console.error {} {}', type: 'string' }, 'sentry.message.parameter.0': { value: 123, type: 'integer' }, 'sentry.message.parameter.1': { value: false, type: 'boolean' }, @@ -124,6 +130,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -136,6 +143,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'Object: {}', type: 'string' }, 'sentry.message.parameter.0': { value: '{"key":"value","nested":{"prop":123}}', type: 'string' }, }, @@ -150,6 +158,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'Array: {}', type: 'string' }, 'sentry.message.parameter.0': { value: '[1,2,3,"string"]', type: 'string' }, }, @@ -164,6 +173,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'Mixed: {} {} {} {}', type: 'string' }, 'sentry.message.parameter.0': { value: 'prefix', type: 'string' }, 'sentry.message.parameter.1': { value: '{"obj":true}', type: 'string' }, @@ -181,6 +191,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -193,6 +204,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -205,6 +217,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -217,6 +230,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'first {} {} {}', type: 'string' }, 'sentry.message.parameter.0': { value: 0, type: 'integer' }, 'sentry.message.parameter.1': { value: 1, type: 'integer' }, @@ -233,6 +247,7 @@ sentryTest('should capture console object calls', async ({ getLocalTestUrl, page 'sentry.origin': { value: 'auto.log.console', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'hello {} {} {}', type: 'string' }, 'sentry.message.parameter.0': { value: true, type: 'boolean' }, 'sentry.message.parameter.1': { value: 'null', type: 'string' }, diff --git a/dev-packages/browser-integration-tests/suites/public-api/logger/scopeAttributes/test.ts b/dev-packages/browser-integration-tests/suites/public-api/logger/scopeAttributes/test.ts index c02a110046dd..4d7970945436 100644 --- a/dev-packages/browser-integration-tests/suites/public-api/logger/scopeAttributes/test.ts +++ b/dev-packages/browser-integration-tests/suites/public-api/logger/scopeAttributes/test.ts @@ -32,6 +32,7 @@ sentryTest('captures logs with scope attributes', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, log_attr: { value: 'log_attr_1', type: 'string' }, }, }, @@ -44,6 +45,7 @@ sentryTest('captures logs with scope attributes', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, global_scope_attr: { value: true, type: 'boolean' }, log_attr: { value: 'log_attr_2', type: 'string' }, }, @@ -57,6 +59,7 @@ sentryTest('captures logs with scope attributes', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, global_scope_attr: { value: true, type: 'boolean' }, isolation_scope_1_attr: { value: 100, unit: 'millisecond', type: 'integer' }, log_attr: { value: 'log_attr_3', type: 'string' }, @@ -71,6 +74,7 @@ sentryTest('captures logs with scope attributes', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, global_scope_attr: { value: true, type: 'boolean' }, isolation_scope_1_attr: { value: 100, unit: 'millisecond', type: 'integer' }, scope_attr: { value: 200, unit: 'millisecond', type: 'integer' }, @@ -86,6 +90,7 @@ sentryTest('captures logs with scope attributes', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, global_scope_attr: { value: true, type: 'boolean' }, isolation_scope_1_attr: { value: 100, unit: 'millisecond', type: 'integer' }, scope_2_attr: { value: 300, unit: 'millisecond', type: 'integer' }, diff --git a/dev-packages/browser-integration-tests/suites/public-api/logger/simple/test.ts b/dev-packages/browser-integration-tests/suites/public-api/logger/simple/test.ts index aa2159d13bc1..db6d174820d7 100644 --- a/dev-packages/browser-integration-tests/suites/public-api/logger/simple/test.ts +++ b/dev-packages/browser-integration-tests/suites/public-api/logger/simple/test.ts @@ -33,6 +33,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -44,6 +45,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -55,6 +57,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -66,6 +69,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -77,6 +81,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -88,6 +93,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -99,6 +105,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'test %s %s %s %s', type: 'string' }, 'sentry.message.parameter.0': { value: 'trace', type: 'string' }, 'sentry.message.parameter.1': { value: 'stringArg', type: 'string' }, @@ -115,6 +122,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'test %s %s %s %s', type: 'string' }, 'sentry.message.parameter.0': { value: 'debug', type: 'string' }, 'sentry.message.parameter.1': { value: 'stringArg', type: 'string' }, @@ -131,6 +139,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'test %s %s %s %s', type: 'string' }, 'sentry.message.parameter.0': { value: 'info', type: 'string' }, 'sentry.message.parameter.1': { value: 'stringArg', type: 'string' }, @@ -147,6 +156,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'test %s %s %s %s', type: 'string' }, 'sentry.message.parameter.0': { value: 'warn', type: 'string' }, 'sentry.message.parameter.1': { value: 'stringArg', type: 'string' }, @@ -163,6 +173,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'test %s %s %s %s', type: 'string' }, 'sentry.message.parameter.0': { value: 'error', type: 'string' }, 'sentry.message.parameter.1': { value: 'stringArg', type: 'string' }, @@ -179,6 +190,7 @@ sentryTest('should capture all logging methods', async ({ getLocalTestUrl, page attributes: { 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, 'sentry.message.template': { value: 'test %s %s %s %s', type: 'string' }, 'sentry.message.parameter.0': { value: 'fatal', type: 'string' }, 'sentry.message.parameter.1': { value: 'stringArg', type: 'string' }, diff --git a/dev-packages/browser-integration-tests/suites/public-api/metrics/simple/test.ts b/dev-packages/browser-integration-tests/suites/public-api/metrics/simple/test.ts index f9722fc0bec8..66f44878ac86 100644 --- a/dev-packages/browser-integration-tests/suites/public-api/metrics/simple/test.ts +++ b/dev-packages/browser-integration-tests/suites/public-api/metrics/simple/test.ts @@ -40,6 +40,7 @@ sentryTest('should capture all metric types', async ({ getLocalTestUrl, page }) 'sentry.environment': { value: 'test', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -55,6 +56,7 @@ sentryTest('should capture all metric types', async ({ getLocalTestUrl, page }) 'sentry.environment': { value: 'test', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -70,6 +72,7 @@ sentryTest('should capture all metric types', async ({ getLocalTestUrl, page }) 'sentry.environment': { value: 'test', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -85,6 +88,7 @@ sentryTest('should capture all metric types', async ({ getLocalTestUrl, page }) 'sentry.environment': { value: 'test', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -102,6 +106,7 @@ sentryTest('should capture all metric types', async ({ getLocalTestUrl, page }) 'sentry.environment': { value: 'test', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.browser', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, { @@ -144,6 +149,10 @@ sentryTest('should capture all metric types', async ({ getLocalTestUrl, page }) type: 'string', value: expect.any(String), }, + 'sentry.timestamp.sequence': { + type: 'integer', + value: expect.any(Number), + }, 'user.email': { type: 'string', value: 'test@example.com', diff --git a/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/init.js b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/init.js new file mode 100644 index 000000000000..b8f186bc2896 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/init.js @@ -0,0 +1,22 @@ +import * as Sentry from '@sentry/browser'; + +window.Sentry = Sentry; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + integrations: [Sentry.browserTracingIntegration()], + tracePropagationTargets: ['http://sentry-test-site.example'], + tracesSampleRate: 1, + autoSessionTracking: false, +}); + +// Propagate MFE identity from current scope to span attributes. +// withScope() forks the current scope, so tags set on the fork are +// visible when fetch/XHR instrumentation creates spans synchronously. +const client = Sentry.getClient(); +client.on('spanStart', span => { + const mfeName = Sentry.getCurrentScope().getScopeData().tags['mfe.name']; + if (mfeName) { + span.setAttribute('mfe.name', mfeName); + } +}); diff --git a/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/mfe-header.js b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/mfe-header.js new file mode 100644 index 000000000000..16bfecfd15eb --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/mfe-header.js @@ -0,0 +1,8 @@ +import * as Sentry from '@sentry/browser'; + +export function mount() { + Sentry.withScope(scope => { + scope.setTag('mfe.name', 'mfe-header'); + fetch('http://sentry-test-site.example/api/todos/1'); + }); +} diff --git a/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/mfe-one.js b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/mfe-one.js new file mode 100644 index 000000000000..fdd091af4801 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/mfe-one.js @@ -0,0 +1,8 @@ +import * as Sentry from '@sentry/browser'; + +export function mount() { + Sentry.withScope(scope => { + scope.setTag('mfe.name', 'mfe-one'); + fetch('http://sentry-test-site.example/api/todos/2'); + }); +} diff --git a/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/mfe-two.js b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/mfe-two.js new file mode 100644 index 000000000000..ff6c23d374e0 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/mfe-two.js @@ -0,0 +1,8 @@ +import * as Sentry from '@sentry/browser'; + +export function mount() { + Sentry.withScope(scope => { + scope.setTag('mfe.name', 'mfe-two'); + fetch('http://sentry-test-site.example/api/todos/3'); + }); +} diff --git a/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/subject.js b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/subject.js new file mode 100644 index 000000000000..b36ad956e913 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/subject.js @@ -0,0 +1,9 @@ +// Simulates a microfrontend architecture where MFEs are lazy-loaded + +// Lazy-load each MFE (kinda like React.lazy + Module Federation) +import('./mfe-header').then(m => m.mount()); +import('./mfe-one').then(m => m.mount()); +import('./mfe-two').then(m => m.mount()); + +// Shell makes its own request, no MFE scope +fetch('http://sentry-test-site.example/api/shell-config'); diff --git a/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/template.html b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/template.html new file mode 100644 index 000000000000..acc42eb2480f --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/template.html @@ -0,0 +1,9 @@ + + + + + + +
+ + diff --git a/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/test.ts b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/test.ts new file mode 100644 index 000000000000..e49b258b4ff2 --- /dev/null +++ b/dev-packages/browser-integration-tests/suites/tracing/microfrontend-span-attribution/test.ts @@ -0,0 +1,37 @@ +import { expect } from '@playwright/test'; +import { sentryTest } from '../../../utils/fixtures'; +import { envelopeRequestParser, shouldSkipTracingTest, waitForTransactionRequest } from '../../../utils/helpers'; + +sentryTest('should attribute spans to their originating microfrontend', async ({ getLocalTestUrl, page }) => { + if (shouldSkipTracingTest()) { + sentryTest.skip(); + } + + await page.route('http://sentry-test-site.example/*', route => route.fulfill({ body: '{}' })); + + const url = await getLocalTestUrl({ testDir: __dirname }); + + const reqPromise = waitForTransactionRequest(page, event => { + const spans = event.spans || []; + return ( + spans.some(s => s.description?.includes('/api/todos/1')) && + spans.some(s => s.description?.includes('/api/todos/2')) && + spans.some(s => s.description?.includes('/api/todos/3')) && + spans.some(s => s.description?.includes('/api/shell-config')) + ); + }); + + await page.goto(url); + + const req = await reqPromise; + const event = envelopeRequestParser(req); + const httpSpans = event.spans?.filter(({ op }) => op === 'http.client') || []; + + // Each MFE's fetch is attributed via withScope + spanStart hook + expect(httpSpans.find(s => s.description?.includes('/api/todos/1'))?.data?.['mfe.name']).toBe('mfe-header'); + expect(httpSpans.find(s => s.description?.includes('/api/todos/2'))?.data?.['mfe.name']).toBe('mfe-one'); + expect(httpSpans.find(s => s.description?.includes('/api/todos/3'))?.data?.['mfe.name']).toBe('mfe-two'); + + // Shell span has no MFE tag + expect(httpSpans.find(s => s.description?.includes('/api/shell-config'))?.data?.['mfe.name']).toBeUndefined(); +}); diff --git a/dev-packages/browser-integration-tests/utils/replayHelpers.ts b/dev-packages/browser-integration-tests/utils/replayHelpers.ts index 36af7740047e..408a3aadc933 100644 --- a/dev-packages/browser-integration-tests/utils/replayHelpers.ts +++ b/dev-packages/browser-integration-tests/utils/replayHelpers.ts @@ -364,7 +364,7 @@ export function replayEnvelopeIsCompressed(resOrReq: Request | Response): boolea const lines: boolean[] = envelopeString.split('\n').map(line => { try { JSON.parse(line); - } catch (error) { + } catch { // If we fail to parse a line, we _might_ have found a compressed payload, // so let's check if this is actually the case. // This is quite hacky but we can't go through `line` because the prior operations @@ -394,7 +394,7 @@ export const replayEnvelopeParser = (request: Request | null): unknown[] => { const lines = envelopeString.split('\n').map(line => { try { return JSON.parse(line); - } catch (error) { + } catch { // If we fail to parse a line, we _might_ have found a compressed payload, // so let's check if this is actually the case. // This is quite hacky but we can't go through `line` because the prior operations diff --git a/dev-packages/clear-cache-gh-action/package.json b/dev-packages/clear-cache-gh-action/package.json index 5f1b47743cb8..a68d7c17980d 100644 --- a/dev-packages/clear-cache-gh-action/package.json +++ b/dev-packages/clear-cache-gh-action/package.json @@ -10,8 +10,8 @@ "main": "index.mjs", "type": "module", "scripts": { - "lint": "oxlint .", - "fix": "oxlint . --fix" + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware" }, "dependencies": { "@actions/core": "1.10.1", diff --git a/dev-packages/cloudflare-integration-tests/expect.ts b/dev-packages/cloudflare-integration-tests/expect.ts index b33926ffce11..c3e2bd007436 100644 --- a/dev-packages/cloudflare-integration-tests/expect.ts +++ b/dev-packages/cloudflare-integration-tests/expect.ts @@ -28,6 +28,7 @@ function getSdk(sdk: 'cloudflare' | 'hono'): SdkInfo { name: `npm:@sentry/${sdk}`, version: SDK_VERSION, }, + ...(sdk === 'hono' ? [{ name: 'npm:@sentry/cloudflare', version: SDK_VERSION }] : []), ], version: SDK_VERSION, }; diff --git a/dev-packages/cloudflare-integration-tests/package.json b/dev-packages/cloudflare-integration-tests/package.json index efb6064e2f85..daa8268e3e7a 100644 --- a/dev-packages/cloudflare-integration-tests/package.json +++ b/dev-packages/cloudflare-integration-tests/package.json @@ -7,8 +7,8 @@ }, "private": true, "scripts": { - "lint": "oxlint .", - "fix": "oxlint . --fix", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "test": "vitest run", "test:watch": "yarn test --watch" }, @@ -16,7 +16,7 @@ "@langchain/langgraph": "^1.0.1", "@sentry/cloudflare": "10.43.0", "@sentry/hono": "10.43.0", - "hono": "^4.12.5" + "hono": "^4.12.7" }, "devDependencies": { "@cloudflare/workers-types": "^4.20250922.0", diff --git a/dev-packages/cloudflare-integration-tests/suites/hono-sdk/index.ts b/dev-packages/cloudflare-integration-tests/suites/hono-sdk/index.ts index 27dfdafbc7a8..ce5e0ab46512 100644 --- a/dev-packages/cloudflare-integration-tests/suites/hono-sdk/index.ts +++ b/dev-packages/cloudflare-integration-tests/suites/hono-sdk/index.ts @@ -8,7 +8,6 @@ interface Env { const app = new Hono<{ Bindings: Env }>(); app.use( - '*', sentry(app, { dsn: process.env.SENTRY_DSN, tracesSampleRate: 1.0, diff --git a/dev-packages/cloudflare-integration-tests/suites/hono-sdk/test.ts b/dev-packages/cloudflare-integration-tests/suites/hono-sdk/test.ts index 4f8472ee8164..c1f17ddb6d19 100644 --- a/dev-packages/cloudflare-integration-tests/suites/hono-sdk/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/hono-sdk/test.ts @@ -17,7 +17,7 @@ it('Hono app captures parametrized errors (Hono SDK)', async ({ signal }) => { stacktrace: { frames: expect.any(Array), }, - mechanism: { type: 'auto.faas.hono.error_handler', handled: false }, + mechanism: { type: 'auto.http.hono.context_error', handled: false }, }, ], }, diff --git a/dev-packages/cloudflare-integration-tests/suites/public-api/metrics/server-address/test.ts b/dev-packages/cloudflare-integration-tests/suites/public-api/metrics/server-address/test.ts index 5ee5b0954e59..013bf552d772 100644 --- a/dev-packages/cloudflare-integration-tests/suites/public-api/metrics/server-address/test.ts +++ b/dev-packages/cloudflare-integration-tests/suites/public-api/metrics/server-address/test.ts @@ -36,6 +36,10 @@ it('should add server.address attribute to metrics when serverName is set', asyn type: 'string', value: expect.any(String), }, + 'sentry.timestamp.sequence': { + type: 'integer', + value: expect.any(Number), + }, 'server.address': { type: 'string', value: 'mi-servidor.com', diff --git a/dev-packages/e2e-tests/package.json b/dev-packages/e2e-tests/package.json index 4b106ecf9d64..55e8a937d6b3 100644 --- a/dev-packages/e2e-tests/package.json +++ b/dev-packages/e2e-tests/package.json @@ -4,8 +4,8 @@ "license": "MIT", "private": true, "scripts": { - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:ts": "tsc --noEmit", "test:e2e": "run-s test:validate-configuration test:validate-test-app-setups test:run", "test:run": "ts-node run.ts", diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/.gitignore b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/.gitignore new file mode 100644 index 000000000000..560782d47d98 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/.gitignore @@ -0,0 +1,26 @@ +# build output +dist/ + +# generated types +.astro/ + +# dependencies +node_modules/ + +# logs +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* + +# environment variables +.env +.env.production + +# macOS-specific files +.DS_Store + +# jetbrains setting folder +.idea/ + +test-results diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/.npmrc b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/.npmrc new file mode 100644 index 000000000000..070f80f05092 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/.npmrc @@ -0,0 +1,2 @@ +@sentry:registry=http://127.0.0.1:4873 +@sentry-internal:registry=http://127.0.0.1:4873 diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/README.md b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/README.md new file mode 100644 index 000000000000..ff19a3e7ece8 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/README.md @@ -0,0 +1,48 @@ +# Astro Starter Kit: Basics + +```sh +npm create astro@latest -- --template basics +``` + +[![Open in StackBlitz](https://developer.stackblitz.com/img/open_in_stackblitz.svg)](https://stackblitz.com/github/withastro/astro/tree/latest/examples/basics) +[![Open with CodeSandbox](https://assets.codesandbox.io/github/button-edit-lime.svg)](https://codesandbox.io/p/sandbox/github/withastro/astro/tree/latest/examples/basics) +[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/withastro/astro?devcontainer_path=.devcontainer/basics/devcontainer.json) + +> 🧑‍🚀 **Seasoned astronaut?** Delete this file. Have fun! + +![just-the-basics](https://github.com/withastro/astro/assets/2244813/a0a5533c-a856-4198-8470-2d67b1d7c554) + +## 🚀 Project Structure + +Inside of your Astro project, you'll see the following folders and files: + +```text +/ +├── public/ +│ └── favicon.svg +├── src/ +│ ├── layouts/ +│ │ └── Layout.astro +│ └── pages/ +│ └── index.astro +└── package.json +``` + +To learn more about the folder structure of an Astro project, refer to [our guide on project structure](https://docs.astro.build/en/basics/project-structure/). + +## 🧞 Commands + +All commands are run from the root of the project, from a terminal: + +| Command | Action | +| :------------------------ | :----------------------------------------------- | +| `npm install` | Installs dependencies | +| `npm run dev` | Starts local dev server at `localhost:4321` | +| `npm run build` | Build your production site to `./dist/` | +| `npm run preview` | Preview your build locally, before deploying | +| `npm run astro ...` | Run CLI commands like `astro add`, `astro check` | +| `npm run astro -- --help` | Get help using the Astro CLI | + +## 👀 Want to learn more? + +Feel free to check [our documentation](https://docs.astro.build) or jump into our [Discord server](https://astro.build/chat). diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/astro.config.mjs b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/astro.config.mjs new file mode 100644 index 000000000000..f19212596282 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/astro.config.mjs @@ -0,0 +1,21 @@ +import sentry from '@sentry/astro'; +// @ts-check +import { defineConfig } from 'astro/config'; +import cloudflare from '@astrojs/cloudflare'; + +// https://astro.build/config +export default defineConfig({ + integrations: [ + sentry({ + debug: true, + sourceMapsUploadOptions: { + enabled: false, + }, + }), + ], + output: 'server', + security: { + allowedDomains: [{ hostname: 'localhost' }], + }, + adapter: cloudflare(), +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/package.json b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/package.json new file mode 100644 index 000000000000..2e7cfa8e62dd --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/package.json @@ -0,0 +1,28 @@ +{ + "name": "astro-6-cf-workers", + "type": "module", + "version": "0.0.1", + "scripts": { + "dev": "astro dev", + "build": "astro build", + "preview": "astro preview", + "astro": "astro", + "start": "wrangler dev --var \"E2E_TEST_DSN:$E2E_TEST_DSN\" --port 3030", + "test:build": "pnpm install && pnpm build", + "test:assert": "TEST_ENV=production playwright test", + "generate-types": "wrangler types" + }, + "dependencies": { + "@astrojs/cloudflare": "^13.0.2", + "@playwright/test": "~1.56.0", + "@sentry-internal/test-utils": "link:../../../test-utils", + "@sentry/astro": "latest || *", + "@sentry/cloudflare": "latest || *", + "astro": "^6.0.0", + "wrangler": "^4.72.0" + }, + "volta": { + "node": "22.22.0", + "extends": "../../package.json" + } +} diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/playwright.config.mjs b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/playwright.config.mjs new file mode 100644 index 000000000000..ae58e4ff3ddc --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/playwright.config.mjs @@ -0,0 +1,13 @@ +import { getPlaywrightConfig } from '@sentry-internal/test-utils'; + +const testEnv = process.env.TEST_ENV; + +if (!testEnv) { + throw new Error('No test env defined'); +} + +const config = getPlaywrightConfig({ + startCommand: 'pnpm start', +}); + +export default config; diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/public/favicon.svg b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/public/favicon.svg new file mode 100644 index 000000000000..f157bd1c5e28 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/public/favicon.svg @@ -0,0 +1,9 @@ + + + + diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/sentry.client.config.js b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/sentry.client.config.js new file mode 100644 index 000000000000..83573d36d0be --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/sentry.client.config.js @@ -0,0 +1,22 @@ +import * as Sentry from '@sentry/astro'; + +Sentry.init({ + dsn: import.meta.env.PUBLIC_E2E_TEST_DSN, + environment: 'qa', + tracesSampleRate: 1.0, + tunnel: 'http://localhost:3031/', // proxy server + integrations: [ + Sentry.browserTracingIntegration({ + beforeStartSpan: opts => { + if (opts.name.startsWith('/blog/')) { + return { + ...opts, + name: window.location.pathname, + }; + } + return opts; + }, + }), + ], + debug: true, +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/sentry.server.config.js b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/sentry.server.config.js new file mode 100644 index 000000000000..8f1a839bc05f --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/sentry.server.config.js @@ -0,0 +1,13 @@ +import * as Sentry from '@sentry/cloudflare'; +import handler from '@astrojs/cloudflare/entrypoints/server'; + +export default Sentry.withSentry( + env => ({ + dsn: env.E2E_TEST_DSN, + environment: 'qa', + tracesSampleRate: 1.0, + tunnel: 'http://localhost:3031/', // proxy server + debug: true, + }), + handler, +); diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/assets/astro.svg b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/assets/astro.svg new file mode 100644 index 000000000000..8cf8fb0c7da6 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/assets/astro.svg @@ -0,0 +1 @@ + diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/assets/background.svg b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/assets/background.svg new file mode 100644 index 000000000000..4b2be0ac0e47 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/assets/background.svg @@ -0,0 +1 @@ + diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/components/Avatar.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/components/Avatar.astro new file mode 100644 index 000000000000..5611579efaf1 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/components/Avatar.astro @@ -0,0 +1,5 @@ +--- + +--- + +User avatar diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/components/Welcome.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/components/Welcome.astro new file mode 100644 index 000000000000..6f862e767574 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/components/Welcome.astro @@ -0,0 +1,205 @@ +--- +import astroLogo from '../assets/astro.svg'; +import background from '../assets/background.svg'; +--- + + + + diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/layouts/Layout.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/layouts/Layout.astro new file mode 100644 index 000000000000..6105f48ffd35 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/layouts/Layout.astro @@ -0,0 +1,22 @@ + + + + + + + + Astro Basics + + + + + + + diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/api/user/[userId].json.js b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/api/user/[userId].json.js new file mode 100644 index 000000000000..481c8979dc89 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/api/user/[userId].json.js @@ -0,0 +1,8 @@ +export function GET({ params }) { + return new Response( + JSON.stringify({ + greeting: `Hello ${params.userId}`, + userId: params.userId, + }), + ); +} diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/blog/[slug].astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/blog/[slug].astro new file mode 100644 index 000000000000..b776fa25c494 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/blog/[slug].astro @@ -0,0 +1,11 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = false; + +const { slug } = Astro.params; +--- + + +

Blog post: {slug}

+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/catchAll/[...path].astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/catchAll/[...path].astro new file mode 100644 index 000000000000..9fe2bdab5c15 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/catchAll/[...path].astro @@ -0,0 +1,11 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = false; + +const params = Astro.params; +--- + + +

params: {params}

+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/client-error/index.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/client-error/index.astro new file mode 100644 index 000000000000..492524e2a713 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/client-error/index.astro @@ -0,0 +1,7 @@ +--- +import Layout from '../../layouts/Layout.astro'; +--- + + + + diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/endpoint-error/api.ts b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/endpoint-error/api.ts new file mode 100644 index 000000000000..a76accdba010 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/endpoint-error/api.ts @@ -0,0 +1,15 @@ +import type { APIRoute } from 'astro'; + +export const prerender = false; + +export const GET: APIRoute = ({ request, url }) => { + if (url.searchParams.has('error')) { + throw new Error('Endpoint Error'); + } + return new Response( + JSON.stringify({ + search: url.search, + sp: url.searchParams, + }), + ); +}; diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/endpoint-error/index.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/endpoint-error/index.astro new file mode 100644 index 000000000000..ecfb0641144e --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/endpoint-error/index.astro @@ -0,0 +1,9 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = false; +--- + + + + diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/index.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/index.astro new file mode 100644 index 000000000000..7032437764f8 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/index.astro @@ -0,0 +1,23 @@ +--- +import Welcome from '../components/Welcome.astro'; +import Layout from '../layouts/Layout.astro'; + +// Welcome to Astro! Wondering what to do next? Check out the Astro documentation at https://docs.astro.build +// Don't want to use any of this? Delete everything in this file, the `assets`, `components`, and `layouts` directories, and start fresh. +--- + + +
+

Astro E2E Test App

+ +
+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/server-island/index.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/server-island/index.astro new file mode 100644 index 000000000000..0e922af4667f --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/server-island/index.astro @@ -0,0 +1,14 @@ +--- +import Avatar from '../../components/Avatar.astro'; +import Layout from '../../layouts/Layout.astro'; + +export const prerender = true; +--- + + +

This page is static, except for the avatar which is loaded dynamically from the server

+ + +

Fallback

+
+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/ssr-error/index.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/ssr-error/index.astro new file mode 100644 index 000000000000..fc42bcbae4f7 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/ssr-error/index.astro @@ -0,0 +1,11 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +const a = {} as any; +console.log(a.foo.x); +export const prerender = false; +--- + + +

Page with SSR error

+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/test-ssr/index.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/test-ssr/index.astro new file mode 100644 index 000000000000..4531c20c05ad --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/test-ssr/index.astro @@ -0,0 +1,11 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = false; +--- + + +

This is a server page

+ + +
diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/test-static/index.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/test-static/index.astro new file mode 100644 index 000000000000..c0fd701d4a2a --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/test-static/index.astro @@ -0,0 +1,11 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = true; +--- + + +

This is a static page

+ + +
diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/user-page/[userId].astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/user-page/[userId].astro new file mode 100644 index 000000000000..8050e386a39f --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/user-page/[userId].astro @@ -0,0 +1,16 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = false; + +const { userId } = Astro.params; + +const response = await fetch(Astro.url.origin + `/api/user/${userId}.json`); +const data = await response.json(); +--- + + +

{data.greeting}

+ +

data: {JSON.stringify(data)}

+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/user-page/settings.astro b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/user-page/settings.astro new file mode 100644 index 000000000000..8260e632c07b --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/src/pages/user-page/settings.astro @@ -0,0 +1,7 @@ +--- +import Layout from '../../layouts/Layout.astro'; +--- + + +

User Settings

+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/start-event-proxy.mjs b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/start-event-proxy.mjs new file mode 100644 index 000000000000..818e677ba3da --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/start-event-proxy.mjs @@ -0,0 +1,6 @@ +import { startEventProxyServer } from '@sentry-internal/test-utils'; + +startEventProxyServer({ + port: 3031, + proxyServerName: 'astro-6-cf-workers', +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tests/errors.client.test.ts b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tests/errors.client.test.ts new file mode 100644 index 000000000000..44a1e95dc7da --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tests/errors.client.test.ts @@ -0,0 +1,79 @@ +import { expect, test } from '@playwright/test'; +import { waitForError } from '@sentry-internal/test-utils'; + +test.describe('client-side errors', () => { + test('captures error thrown on click', async ({ page }) => { + const errorEventPromise = waitForError('astro-6-cf-workers', errorEvent => { + return errorEvent?.exception?.values?.[0]?.value === 'client error'; + }); + + await page.goto('/client-error'); + + await page.getByText('Throw Error').click(); + + const errorEvent = await errorEventPromise; + + const errorEventFrames = errorEvent.exception?.values?.[0]?.stacktrace?.frames; + + expect(errorEventFrames?.[errorEventFrames?.length - 1]).toEqual( + expect.objectContaining({ + colno: expect.any(Number), + lineno: expect.any(Number), + filename: expect.stringContaining('/client-error'), + function: 'HTMLButtonElement.onclick', + in_app: true, + }), + ); + + expect(errorEvent).toMatchObject({ + exception: { + values: [ + { + mechanism: { + handled: false, + type: 'auto.browser.global_handlers.onerror', + }, + type: 'Error', + value: 'client error', + stacktrace: expect.any(Object), // detailed check above + }, + ], + }, + level: 'error', + platform: 'javascript', + request: { + url: expect.stringContaining('/client-error'), + headers: { + 'User-Agent': expect.any(String), + }, + }, + event_id: expect.stringMatching(/[a-f0-9]{32}/), + timestamp: expect.any(Number), + sdk: { + integrations: expect.arrayContaining([ + 'InboundFilters', + 'FunctionToString', + 'BrowserApiErrors', + 'Breadcrumbs', + 'GlobalHandlers', + 'LinkedErrors', + 'Dedupe', + 'HttpContext', + 'BrowserSession', + 'BrowserTracing', + ]), + name: 'sentry.javascript.astro', + version: expect.any(String), + packages: expect.any(Array), + }, + transaction: '/client-error', + contexts: { + trace: { + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + span_id: expect.stringMatching(/[a-f0-9]{16}/), + }, + }, + environment: 'qa', + }); + }); +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tests/errors.server.test.ts b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tests/errors.server.test.ts new file mode 100644 index 000000000000..07a22096b1ec --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tests/errors.server.test.ts @@ -0,0 +1,154 @@ +import { expect, test } from '@playwright/test'; +import { waitForError, waitForTransaction } from '@sentry-internal/test-utils'; + +test.describe('server-side errors', () => { + test('captures SSR error', async ({ page }) => { + const errorEventPromise = waitForError('astro-6-cf-workers', errorEvent => { + return errorEvent?.exception?.values?.[0]?.value === "Cannot read properties of undefined (reading 'x')"; + }); + + const transactionEventPromise = waitForTransaction('astro-6-cf-workers', transactionEvent => { + return transactionEvent.transaction === 'GET /ssr-error'; + }); + + // This page returns an error status code, so we need to catch the navigation error + await page.goto('/ssr-error').catch(() => { + // Expected to fail with net::ERR_HTTP_RESPONSE_CODE_FAILURE in newer Chromium versions + }); + + const errorEvent = await errorEventPromise; + const transactionEvent = await transactionEventPromise; + + expect(transactionEvent).toMatchObject({ + transaction: 'GET /ssr-error', + spans: [], + }); + + const traceId = transactionEvent.contexts?.trace?.trace_id; + const spanId = transactionEvent.contexts?.trace?.span_id; + + expect(traceId).toMatch(/[a-f0-9]{32}/); + expect(spanId).toMatch(/[a-f0-9]{16}/); + expect(transactionEvent.contexts?.trace?.parent_span_id).toBeUndefined(); + + expect(errorEvent).toMatchObject({ + contexts: { + cloud_resource: expect.any(Object), + culture: expect.any(Object), + runtime: expect.any(Object), + trace: { + span_id: spanId, + trace_id: traceId, + }, + }, + environment: 'qa', + event_id: expect.stringMatching(/[a-f0-9]{32}/), + exception: { + values: [ + { + mechanism: { + handled: false, + type: 'auto.middleware.astro', + }, + stacktrace: expect.any(Object), + type: 'TypeError', + value: "Cannot read properties of undefined (reading 'x')", + }, + ], + }, + platform: 'javascript', + request: { + headers: expect.objectContaining({ + host: 'localhost:3030', + 'user-agent': expect.any(String), + }), + method: 'GET', + url: expect.stringContaining('/ssr-error'), + }, + sdk: { + integrations: expect.any(Array), + name: 'sentry.javascript.cloudflare', + packages: expect.any(Array), + version: expect.any(String), + }, + timestamp: expect.any(Number), + transaction: 'GET /ssr-error', + }); + }); + + test('captures endpoint error', async ({ page }) => { + const errorEventPromise = waitForError('astro-6-cf-workers', errorEvent => { + return errorEvent?.exception?.values?.[0]?.value === 'Endpoint Error'; + }); + const transactionEventApiPromise = waitForTransaction('astro-6-cf-workers', transactionEvent => { + return transactionEvent.transaction === 'GET /endpoint-error/api'; + }); + const transactionEventEndpointPromise = waitForTransaction('astro-6-cf-workers', transactionEvent => { + return transactionEvent.transaction === 'GET /endpoint-error'; + }); + + await page.goto('/endpoint-error'); + await page.getByText('Get Data').click(); + + const errorEvent = await errorEventPromise; + const transactionEventApi = await transactionEventApiPromise; + const transactionEventEndpoint = await transactionEventEndpointPromise; + + expect(transactionEventEndpoint).toMatchObject({ + transaction: 'GET /endpoint-error', + spans: [], + }); + + const traceId = transactionEventEndpoint.contexts?.trace?.trace_id; + const endpointSpanId = transactionEventApi.contexts?.trace?.span_id; + + expect(traceId).toMatch(/[a-f0-9]{32}/); + expect(endpointSpanId).toMatch(/[a-f0-9]{16}/); + + expect(transactionEventApi).toMatchObject({ + transaction: 'GET /endpoint-error/api', + spans: [], + }); + + const spanId = transactionEventApi.contexts?.trace?.span_id; + const parentSpanId = transactionEventApi.contexts?.trace?.parent_span_id; + + expect(spanId).toMatch(/[a-f0-9]{16}/); + // TODO: This is incorrect, for whatever reason, it should be the endpointSpanId ideally + expect(parentSpanId).toMatch(/[a-f0-9]{16}/); + expect(parentSpanId).not.toEqual(endpointSpanId); + + expect(errorEvent).toMatchObject({ + contexts: { + trace: { + parent_span_id: parentSpanId, + span_id: spanId, + trace_id: traceId, + }, + }, + exception: { + values: [ + { + mechanism: { + handled: false, + type: 'auto.middleware.astro', + }, + stacktrace: expect.any(Object), + type: 'Error', + value: 'Endpoint Error', + }, + ], + }, + platform: 'javascript', + request: { + headers: expect.objectContaining({ + accept: expect.any(String), + }), + method: 'GET', + query_string: 'error=1', + url: expect.stringContaining('endpoint-error/api?error=1'), + }, + transaction: 'GET /endpoint-error/api', + }); + }); +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tests/tracing.dynamic.test.ts b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tests/tracing.dynamic.test.ts new file mode 100644 index 000000000000..a5bbc408862c --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tests/tracing.dynamic.test.ts @@ -0,0 +1,393 @@ +import { expect, test } from '@playwright/test'; +import { waitForTransaction } from '@sentry-internal/test-utils'; + +test.describe('tracing in dynamically rendered (ssr) routes', () => { + test('sends server and client pageload spans with the same trace id', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction === '/test-ssr'; + }); + + const serverPageRequestTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction === 'GET /test-ssr'; + }); + + await page.goto('/test-ssr'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const serverPageRequestTxn = await serverPageRequestTxnPromise; + + const clientPageloadTraceId = clientPageloadTxn.contexts?.trace?.trace_id; + const clientPageloadParentSpanId = clientPageloadTxn.contexts?.trace?.parent_span_id; + + const serverPageRequestTraceId = serverPageRequestTxn.contexts?.trace?.trace_id; + const serverPageloadSpanId = serverPageRequestTxn.contexts?.trace?.span_id; + + expect(clientPageloadTraceId).toEqual(serverPageRequestTraceId); + expect(clientPageloadParentSpanId).toEqual(serverPageloadSpanId); + + expect(clientPageloadTxn).toMatchObject({ + contexts: { + trace: { + data: expect.objectContaining({ + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.astro', + 'sentry.source': 'route', + }), + op: 'pageload', + origin: 'auto.pageload.astro', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + }, + }, + environment: 'qa', + event_id: expect.stringMatching(/[a-f0-9]{32}/), + measurements: expect.any(Object), + platform: 'javascript', + request: expect.any(Object), + sdk: { + integrations: expect.any(Array), + name: 'sentry.javascript.astro', + packages: expect.any(Array), + version: expect.any(String), + }, + spans: expect.any(Array), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + transaction: '/test-ssr', + transaction_info: { + source: 'route', + }, + type: 'transaction', + }); + + expect(serverPageRequestTxn).toMatchObject({ + contexts: { + cloud_resource: expect.any(Object), + culture: expect.any(Object), + runtime: expect.any(Object), + trace: { + data: { + 'http.response.status_code': 200, + method: 'GET', + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.sample_rate': 1, + 'sentry.source': 'route', + 'http.request.header.accept': expect.any(String), + 'http.request.header.accept_encoding': 'br, gzip', + 'http.request.header.accept_language': 'en-US', + 'http.request.header.sec_fetch_mode': 'navigate', + 'http.request.header.user_agent': expect.any(String), + }, + op: 'http.server', + origin: 'auto.http.astro', + status: 'ok', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + }, + }, + environment: 'qa', + event_id: expect.stringMatching(/[a-f0-9]{32}/), + platform: 'javascript', + request: { + headers: expect.objectContaining({ + accept: expect.any(String), + 'accept-encoding': expect.any(String), + 'user-agent': expect.any(String), + }), + method: 'GET', + url: expect.stringContaining('/test-ssr'), + }, + sdk: { + integrations: expect.any(Array), + name: 'sentry.javascript.cloudflare', + packages: expect.any(Array), + version: expect.any(String), + }, + spans: expect.any(Array), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + transaction: 'GET /test-ssr', + transaction_info: { + source: 'route', + }, + type: 'transaction', + }); + }); +}); + +test.describe('nested SSR routes (client, server, server request)', () => { + /** The user-page route fetches from an endpoint and creates a deeply nested span structure: + * pageload — /user-page/myUsername123 + * ├── browser.** — multiple browser spans + * └── browser.request — /user-page/myUsername123 + * └── http.server — GET /user-page/[userId] (SSR page request) + * └── http.client — GET /api/user/myUsername123.json (executing fetch call from SSR page - span) + * └── http.server — GET /api/user/myUsername123.json (server request) + */ + test('sends connected server and client pageload and request spans with the same trace id', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('/user-page/') ?? false; + }); + + const serverPageRequestTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /user-page/') ?? false; + }); + + const serverHTTPServerRequestTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /api/user/') ?? false; + }); + + await page.goto('/user-page/myUsername123'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const serverPageRequestTxn = await serverPageRequestTxnPromise; + const serverHTTPServerRequestTxn = await serverHTTPServerRequestTxnPromise; + const serverRequestHTTPClientSpan = serverPageRequestTxn.spans?.find( + span => span.op === 'http.client' && span.description?.includes('/api/user/'), + ); + + const clientPageloadTraceId = clientPageloadTxn.contexts?.trace?.trace_id; + + // Verify all spans have the same trace ID + expect(clientPageloadTraceId).toEqual(serverPageRequestTxn.contexts?.trace?.trace_id); + expect(clientPageloadTraceId).toEqual(serverHTTPServerRequestTxn.contexts?.trace?.trace_id); + expect(clientPageloadTraceId).toEqual(serverRequestHTTPClientSpan?.trace_id); + + // serverPageRequest has no parent (root span) + expect(serverPageRequestTxn.contexts?.trace?.parent_span_id).toBeUndefined(); + + // clientPageload's parent and serverRequestHTTPClient's parent is serverPageRequest + const serverPageRequestSpanId = serverPageRequestTxn.contexts?.trace?.span_id; + expect(clientPageloadTxn.contexts?.trace?.parent_span_id).toEqual(serverPageRequestSpanId); + expect(serverRequestHTTPClientSpan?.parent_span_id).toEqual(serverPageRequestSpanId); + + // serverHTTPServerRequest's parent is serverRequestHTTPClient + expect(serverHTTPServerRequestTxn.contexts?.trace?.parent_span_id).toEqual(serverRequestHTTPClientSpan?.span_id); + }); + + test('sends parametrized pageload, server and API request transaction names', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('/user-page/') ?? false; + }); + + const serverPageRequestTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /user-page/') ?? false; + }); + + const serverHTTPServerRequestTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /api/user/') ?? false; + }); + + await page.goto('/user-page/myUsername123'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const serverPageRequestTxn = await serverPageRequestTxnPromise; + const serverHTTPServerRequestTxn = await serverHTTPServerRequestTxnPromise; + + const serverRequestHTTPClientSpan = serverPageRequestTxn.spans?.find( + span => span.op === 'http.client' && span.description?.includes('/api/user/'), + ); + + const routeNameMetaContent = await page.locator('meta[name="sentry-route-name"]').getAttribute('content'); + expect(routeNameMetaContent).toBe('%2Fuser-page%2F%5BuserId%5D'); + + // Client pageload transaction - actual URL with pageload operation + expect(clientPageloadTxn).toMatchObject({ + transaction: '/user-page/[userId]', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'pageload', + origin: 'auto.pageload.astro', + data: { + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.astro', + 'sentry.source': 'route', + }, + }, + }, + }); + + // Server page request transaction - parametrized transaction name with actual URL in data + expect(serverPageRequestTxn).toMatchObject({ + transaction: 'GET /user-page/myUsername123', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'http.server', + origin: 'auto.http.astro', + data: { + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.source': 'route', + 'http.request.header.accept': expect.any(String), + 'http.request.header.accept_encoding': 'br, gzip', + 'http.request.header.accept_language': 'en-US', + 'http.request.header.sec_fetch_mode': 'navigate', + 'http.request.header.user_agent': expect.any(String), + }, + }, + }, + request: { url: expect.stringContaining('/user-page/myUsername123') }, + }); + + // HTTP client span - actual API URL with client operation + expect(serverRequestHTTPClientSpan).toMatchObject({ + op: 'http.client', + origin: 'auto.http.fetch', + description: 'GET http://localhost:3030/api/user/myUsername123.json', // http.client does not need to be parametrized + data: { + 'sentry.op': 'http.client', + 'sentry.origin': 'auto.http.fetch', + url: expect.stringContaining('/api/user/myUsername123.json'), + }, + }); + + // Server HTTP request transaction + expect(serverHTTPServerRequestTxn).toMatchObject({ + transaction: 'GET /api/user/myUsername123.json', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'http.server', + origin: 'auto.http.astro', + data: { + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.source': 'route', + 'http.request.header.accept_encoding': 'br, gzip', + }, + }, + }, + request: { url: expect.stringContaining('/api/user/myUsername123.json') }, + }); + }); + + test('sends parametrized pageload and server transaction names for catch-all routes', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('/catchAll/') ?? false; + }); + + const serverPageRequestTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /catchAll/') ?? false; + }); + + await page.goto('/catchAll/hell0/whatever-do'); + + const routeNameMetaContent = await page.locator('meta[name="sentry-route-name"]').getAttribute('content'); + expect(routeNameMetaContent).toBe('%2FcatchAll%2F%5B...path%5D'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const serverPageRequestTxn = await serverPageRequestTxnPromise; + + expect(clientPageloadTxn).toMatchObject({ + transaction: '/catchAll/[...path]', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'pageload', + origin: 'auto.pageload.astro', + data: { + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.astro', + 'sentry.source': 'route', + }, + }, + }, + }); + + expect(serverPageRequestTxn).toMatchObject({ + transaction: 'GET /catchAll/hell0/whatever-do', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'http.server', + origin: 'auto.http.astro', + data: { + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.source': 'route', + 'http.request.header.accept': expect.any(String), + 'http.request.header.accept_encoding': 'br, gzip', + 'http.request.header.accept_language': 'en-US', + 'http.request.header.sec_fetch_mode': 'navigate', + 'http.request.header.user_agent': expect.any(String), + }, + }, + }, + request: { url: expect.stringContaining('/catchAll/hell0/whatever-do') }, + }); + }); +}); + +// Case for `user-page/[id]` vs. `user-page/settings` static routes +test.describe('parametrized vs static paths', () => { + test('should use static route name for static route in parametrized path', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('/user-page/') ?? false; + }); + + const serverPageRequestTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /user-page/') ?? false; + }); + + await page.goto('/user-page/settings'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const serverPageRequestTxn = await serverPageRequestTxnPromise; + + expect(clientPageloadTxn).toMatchObject({ + transaction: '/user-page/settings', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'pageload', + origin: 'auto.pageload.astro', + data: { + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.astro', + 'sentry.source': 'route', + }, + }, + }, + }); + + expect(serverPageRequestTxn).toMatchObject({ + transaction: 'GET /user-page/settings', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'http.server', + origin: 'auto.http.astro', + data: { + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.source': 'route', + 'http.request.header.accept': expect.any(String), + 'http.request.header.accept_encoding': 'br, gzip', + 'http.request.header.accept_language': 'en-US', + 'http.request.header.sec_fetch_mode': 'navigate', + 'http.request.header.user_agent': expect.any(String), + }, + }, + }, + request: { url: expect.stringContaining('/user-page/settings') }, + }); + }); + + test('allows for span name override via beforeStartSpan', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6-cf-workers', txnEvent => { + return txnEvent?.transaction?.startsWith('/blog/') ?? false; + }); + + await page.goto('/blog/my-post'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + expect(clientPageloadTxn).toMatchObject({ + transaction: '/blog/my-post', + transaction_info: { source: 'custom' }, + }); + }); +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tsconfig.json b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tsconfig.json new file mode 100644 index 000000000000..153f97ffb6c8 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/tsconfig.json @@ -0,0 +1,5 @@ +{ + "extends": "astro/tsconfigs/strict", + "include": [".astro/types.d.ts", "**/*", "./worker-configuration.d.ts"], + "exclude": ["dist"] +} diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/worker-configuration.d.ts b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/worker-configuration.d.ts new file mode 100644 index 000000000000..afdcb4729237 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/worker-configuration.d.ts @@ -0,0 +1,12248 @@ +/* eslint-disable */ +// Generated by Wrangler by running `wrangler types` (hash: 187132f48ddf0f604882ba8213fe386f) +// Runtime types generated with workerd@1.20260310.1 2026-01-24 global_fetch_strictly_public +declare namespace Cloudflare { + interface Env { + ASSETS: Fetcher; + } +} +interface Env extends Cloudflare.Env {} + +// Begin runtime types +/*! ***************************************************************************** +Copyright (c) Cloudflare. All rights reserved. +Copyright (c) Microsoft Corporation. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. +***************************************************************************** */ +/* eslint-disable */ +// noinspection JSUnusedGlobalSymbols +declare var onmessage: never; +/** + * The **`DOMException`** interface represents an abnormal event (called an **exception**) that occurs as a result of calling a method or accessing a property of a web API. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException) + */ +declare class DOMException extends Error { + constructor(message?: string, name?: string); + /** + * The **`message`** read-only property of the a message or description associated with the given error name. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/message) + */ + readonly message: string; + /** + * The **`name`** read-only property of the one of the strings associated with an error name. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/name) + */ + readonly name: string; + /** + * The **`code`** read-only property of the DOMException interface returns one of the legacy error code constants, or `0` if none match. + * @deprecated + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DOMException/code) + */ + readonly code: number; + static readonly INDEX_SIZE_ERR: number; + static readonly DOMSTRING_SIZE_ERR: number; + static readonly HIERARCHY_REQUEST_ERR: number; + static readonly WRONG_DOCUMENT_ERR: number; + static readonly INVALID_CHARACTER_ERR: number; + static readonly NO_DATA_ALLOWED_ERR: number; + static readonly NO_MODIFICATION_ALLOWED_ERR: number; + static readonly NOT_FOUND_ERR: number; + static readonly NOT_SUPPORTED_ERR: number; + static readonly INUSE_ATTRIBUTE_ERR: number; + static readonly INVALID_STATE_ERR: number; + static readonly SYNTAX_ERR: number; + static readonly INVALID_MODIFICATION_ERR: number; + static readonly NAMESPACE_ERR: number; + static readonly INVALID_ACCESS_ERR: number; + static readonly VALIDATION_ERR: number; + static readonly TYPE_MISMATCH_ERR: number; + static readonly SECURITY_ERR: number; + static readonly NETWORK_ERR: number; + static readonly ABORT_ERR: number; + static readonly URL_MISMATCH_ERR: number; + static readonly QUOTA_EXCEEDED_ERR: number; + static readonly TIMEOUT_ERR: number; + static readonly INVALID_NODE_TYPE_ERR: number; + static readonly DATA_CLONE_ERR: number; + get stack(): any; + set stack(value: any); +} +type WorkerGlobalScopeEventMap = { + fetch: FetchEvent; + scheduled: ScheduledEvent; + queue: QueueEvent; + unhandledrejection: PromiseRejectionEvent; + rejectionhandled: PromiseRejectionEvent; +}; +declare abstract class WorkerGlobalScope extends EventTarget { + EventTarget: typeof EventTarget; +} +/* The **`console`** object provides access to the debugging console (e.g., the Web console in Firefox). * + * The **`console`** object provides access to the debugging console (e.g., the Web console in Firefox). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console) + */ +interface Console { + 'assert'(condition?: boolean, ...data: any[]): void; + /** + * The **`console.clear()`** static method clears the console if possible. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/clear_static) + */ + clear(): void; + /** + * The **`console.count()`** static method logs the number of times that this particular call to `count()` has been called. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/count_static) + */ + count(label?: string): void; + /** + * The **`console.countReset()`** static method resets counter used with console/count_static. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/countReset_static) + */ + countReset(label?: string): void; + /** + * The **`console.debug()`** static method outputs a message to the console at the 'debug' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/debug_static) + */ + debug(...data: any[]): void; + /** + * The **`console.dir()`** static method displays a list of the properties of the specified JavaScript object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/dir_static) + */ + dir(item?: any, options?: any): void; + /** + * The **`console.dirxml()`** static method displays an interactive tree of the descendant elements of the specified XML/HTML element. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/dirxml_static) + */ + dirxml(...data: any[]): void; + /** + * The **`console.error()`** static method outputs a message to the console at the 'error' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/error_static) + */ + error(...data: any[]): void; + /** + * The **`console.group()`** static method creates a new inline group in the Web console log, causing any subsequent console messages to be indented by an additional level, until console/groupEnd_static is called. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/group_static) + */ + group(...data: any[]): void; + /** + * The **`console.groupCollapsed()`** static method creates a new inline group in the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupCollapsed_static) + */ + groupCollapsed(...data: any[]): void; + /** + * The **`console.groupEnd()`** static method exits the current inline group in the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/groupEnd_static) + */ + groupEnd(): void; + /** + * The **`console.info()`** static method outputs a message to the console at the 'info' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/info_static) + */ + info(...data: any[]): void; + /** + * The **`console.log()`** static method outputs a message to the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/log_static) + */ + log(...data: any[]): void; + /** + * The **`console.table()`** static method displays tabular data as a table. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/table_static) + */ + table(tabularData?: any, properties?: string[]): void; + /** + * The **`console.time()`** static method starts a timer you can use to track how long an operation takes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/time_static) + */ + time(label?: string): void; + /** + * The **`console.timeEnd()`** static method stops a timer that was previously started by calling console/time_static. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeEnd_static) + */ + timeEnd(label?: string): void; + /** + * The **`console.timeLog()`** static method logs the current value of a timer that was previously started by calling console/time_static. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/timeLog_static) + */ + timeLog(label?: string, ...data: any[]): void; + timeStamp(label?: string): void; + /** + * The **`console.trace()`** static method outputs a stack trace to the console. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/trace_static) + */ + trace(...data: any[]): void; + /** + * The **`console.warn()`** static method outputs a warning message to the console at the 'warning' log level. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/console/warn_static) + */ + warn(...data: any[]): void; +} +declare const console: Console; +type BufferSource = ArrayBufferView | ArrayBuffer; +type TypedArray = + | Int8Array + | Uint8Array + | Uint8ClampedArray + | Int16Array + | Uint16Array + | Int32Array + | Uint32Array + | Float32Array + | Float64Array + | BigInt64Array + | BigUint64Array; +declare namespace WebAssembly { + class CompileError extends Error { + constructor(message?: string); + } + class RuntimeError extends Error { + constructor(message?: string); + } + type ValueType = 'anyfunc' | 'externref' | 'f32' | 'f64' | 'i32' | 'i64' | 'v128'; + interface GlobalDescriptor { + value: ValueType; + mutable?: boolean; + } + class Global { + constructor(descriptor: GlobalDescriptor, value?: any); + value: any; + valueOf(): any; + } + type ImportValue = ExportValue | number; + type ModuleImports = Record; + type Imports = Record; + type ExportValue = Function | Global | Memory | Table; + type Exports = Record; + class Instance { + constructor(module: Module, imports?: Imports); + readonly exports: Exports; + } + interface MemoryDescriptor { + initial: number; + maximum?: number; + shared?: boolean; + } + class Memory { + constructor(descriptor: MemoryDescriptor); + readonly buffer: ArrayBuffer; + grow(delta: number): number; + } + type ImportExportKind = 'function' | 'global' | 'memory' | 'table'; + interface ModuleExportDescriptor { + kind: ImportExportKind; + name: string; + } + interface ModuleImportDescriptor { + kind: ImportExportKind; + module: string; + name: string; + } + abstract class Module { + static customSections(module: Module, sectionName: string): ArrayBuffer[]; + static exports(module: Module): ModuleExportDescriptor[]; + static imports(module: Module): ModuleImportDescriptor[]; + } + type TableKind = 'anyfunc' | 'externref'; + interface TableDescriptor { + element: TableKind; + initial: number; + maximum?: number; + } + class Table { + constructor(descriptor: TableDescriptor, value?: any); + readonly length: number; + get(index: number): any; + grow(delta: number, value?: any): number; + set(index: number, value?: any): void; + } + function instantiate(module: Module, imports?: Imports): Promise; + function validate(bytes: BufferSource): boolean; +} +/** + * The **`ServiceWorkerGlobalScope`** interface of the Service Worker API represents the global execution context of a service worker. + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ServiceWorkerGlobalScope) + */ +interface ServiceWorkerGlobalScope extends WorkerGlobalScope { + DOMException: typeof DOMException; + WorkerGlobalScope: typeof WorkerGlobalScope; + btoa(data: string): string; + atob(data: string): string; + setTimeout(callback: (...args: any[]) => void, msDelay?: number): number; + setTimeout(callback: (...args: Args) => void, msDelay?: number, ...args: Args): number; + clearTimeout(timeoutId: number | null): void; + setInterval(callback: (...args: any[]) => void, msDelay?: number): number; + setInterval(callback: (...args: Args) => void, msDelay?: number, ...args: Args): number; + clearInterval(timeoutId: number | null): void; + queueMicrotask(task: Function): void; + structuredClone(value: T, options?: StructuredSerializeOptions): T; + reportError(error: any): void; + fetch(input: RequestInfo | URL, init?: RequestInit): Promise; + self: ServiceWorkerGlobalScope; + crypto: Crypto; + caches: CacheStorage; + scheduler: Scheduler; + performance: Performance; + Cloudflare: Cloudflare; + readonly origin: string; + Event: typeof Event; + ExtendableEvent: typeof ExtendableEvent; + CustomEvent: typeof CustomEvent; + PromiseRejectionEvent: typeof PromiseRejectionEvent; + FetchEvent: typeof FetchEvent; + TailEvent: typeof TailEvent; + TraceEvent: typeof TailEvent; + ScheduledEvent: typeof ScheduledEvent; + MessageEvent: typeof MessageEvent; + CloseEvent: typeof CloseEvent; + ReadableStreamDefaultReader: typeof ReadableStreamDefaultReader; + ReadableStreamBYOBReader: typeof ReadableStreamBYOBReader; + ReadableStream: typeof ReadableStream; + WritableStream: typeof WritableStream; + WritableStreamDefaultWriter: typeof WritableStreamDefaultWriter; + TransformStream: typeof TransformStream; + ByteLengthQueuingStrategy: typeof ByteLengthQueuingStrategy; + CountQueuingStrategy: typeof CountQueuingStrategy; + ErrorEvent: typeof ErrorEvent; + MessageChannel: typeof MessageChannel; + MessagePort: typeof MessagePort; + EventSource: typeof EventSource; + ReadableStreamBYOBRequest: typeof ReadableStreamBYOBRequest; + ReadableStreamDefaultController: typeof ReadableStreamDefaultController; + ReadableByteStreamController: typeof ReadableByteStreamController; + WritableStreamDefaultController: typeof WritableStreamDefaultController; + TransformStreamDefaultController: typeof TransformStreamDefaultController; + CompressionStream: typeof CompressionStream; + DecompressionStream: typeof DecompressionStream; + TextEncoderStream: typeof TextEncoderStream; + TextDecoderStream: typeof TextDecoderStream; + Headers: typeof Headers; + Body: typeof Body; + Request: typeof Request; + Response: typeof Response; + WebSocket: typeof WebSocket; + WebSocketPair: typeof WebSocketPair; + WebSocketRequestResponsePair: typeof WebSocketRequestResponsePair; + AbortController: typeof AbortController; + AbortSignal: typeof AbortSignal; + TextDecoder: typeof TextDecoder; + TextEncoder: typeof TextEncoder; + navigator: Navigator; + Navigator: typeof Navigator; + URL: typeof URL; + URLSearchParams: typeof URLSearchParams; + URLPattern: typeof URLPattern; + Blob: typeof Blob; + File: typeof File; + FormData: typeof FormData; + Crypto: typeof Crypto; + SubtleCrypto: typeof SubtleCrypto; + CryptoKey: typeof CryptoKey; + CacheStorage: typeof CacheStorage; + Cache: typeof Cache; + FixedLengthStream: typeof FixedLengthStream; + IdentityTransformStream: typeof IdentityTransformStream; + HTMLRewriter: typeof HTMLRewriter; +} +declare function addEventListener( + type: Type, + handler: EventListenerOrEventListenerObject, + options?: EventTargetAddEventListenerOptions | boolean, +): void; +declare function removeEventListener( + type: Type, + handler: EventListenerOrEventListenerObject, + options?: EventTargetEventListenerOptions | boolean, +): void; +/** + * The **`dispatchEvent()`** method of the EventTarget sends an Event to the object, (synchronously) invoking the affected event listeners in the appropriate order. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/dispatchEvent) + */ +declare function dispatchEvent(event: WorkerGlobalScopeEventMap[keyof WorkerGlobalScopeEventMap]): boolean; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/btoa) */ +declare function btoa(data: string): string; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/atob) */ +declare function atob(data: string): string; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setTimeout) */ +declare function setTimeout(callback: (...args: any[]) => void, msDelay?: number): number; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setTimeout) */ +declare function setTimeout( + callback: (...args: Args) => void, + msDelay?: number, + ...args: Args +): number; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/clearTimeout) */ +declare function clearTimeout(timeoutId: number | null): void; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setInterval) */ +declare function setInterval(callback: (...args: any[]) => void, msDelay?: number): number; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/setInterval) */ +declare function setInterval( + callback: (...args: Args) => void, + msDelay?: number, + ...args: Args +): number; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/clearInterval) */ +declare function clearInterval(timeoutId: number | null): void; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/queueMicrotask) */ +declare function queueMicrotask(task: Function): void; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/structuredClone) */ +declare function structuredClone(value: T, options?: StructuredSerializeOptions): T; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/reportError) */ +declare function reportError(error: any): void; +/* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Window/fetch) */ +declare function fetch(input: RequestInfo | URL, init?: RequestInit): Promise; +declare const self: ServiceWorkerGlobalScope; +/** + * The Web Crypto API provides a set of low-level functions for common cryptographic tasks. + * The Workers runtime implements the full surface of this API, but with some differences in + * the [supported algorithms](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/#supported-algorithms) + * compared to those implemented in most browsers. + * + * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/) + */ +declare const crypto: Crypto; +/** + * The Cache API allows fine grained control of reading and writing from the Cloudflare global network cache. + * + * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/) + */ +declare const caches: CacheStorage; +declare const scheduler: Scheduler; +/** + * The Workers runtime supports a subset of the Performance API, used to measure timing and performance, + * as well as timing of subrequests and other operations. + * + * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/) + */ +declare const performance: Performance; +declare const Cloudflare: Cloudflare; +declare const origin: string; +declare const navigator: Navigator; +interface TestController {} +interface ExecutionContext { + waitUntil(promise: Promise): void; + passThroughOnException(): void; + readonly exports: Cloudflare.Exports; + readonly props: Props; +} +type ExportedHandlerFetchHandler = ( + request: Request>, + env: Env, + ctx: ExecutionContext, +) => Response | Promise; +type ExportedHandlerTailHandler = ( + events: TraceItem[], + env: Env, + ctx: ExecutionContext, +) => void | Promise; +type ExportedHandlerTraceHandler = ( + traces: TraceItem[], + env: Env, + ctx: ExecutionContext, +) => void | Promise; +type ExportedHandlerTailStreamHandler = ( + event: TailStream.TailEvent, + env: Env, + ctx: ExecutionContext, +) => TailStream.TailEventHandlerType | Promise; +type ExportedHandlerScheduledHandler = ( + controller: ScheduledController, + env: Env, + ctx: ExecutionContext, +) => void | Promise; +type ExportedHandlerQueueHandler = ( + batch: MessageBatch, + env: Env, + ctx: ExecutionContext, +) => void | Promise; +type ExportedHandlerTestHandler = ( + controller: TestController, + env: Env, + ctx: ExecutionContext, +) => void | Promise; +interface ExportedHandler { + fetch?: ExportedHandlerFetchHandler; + tail?: ExportedHandlerTailHandler; + trace?: ExportedHandlerTraceHandler; + tailStream?: ExportedHandlerTailStreamHandler; + scheduled?: ExportedHandlerScheduledHandler; + test?: ExportedHandlerTestHandler; + email?: EmailExportedHandler; + queue?: ExportedHandlerQueueHandler; +} +interface StructuredSerializeOptions { + transfer?: any[]; +} +declare abstract class Navigator { + sendBeacon(url: string, body?: BodyInit): boolean; + readonly userAgent: string; + readonly hardwareConcurrency: number; + readonly language: string; + readonly languages: string[]; +} +interface AlarmInvocationInfo { + readonly isRetry: boolean; + readonly retryCount: number; +} +interface Cloudflare { + readonly compatibilityFlags: Record; +} +interface DurableObject { + fetch(request: Request): Response | Promise; + alarm?(alarmInfo?: AlarmInvocationInfo): void | Promise; + webSocketMessage?(ws: WebSocket, message: string | ArrayBuffer): void | Promise; + webSocketClose?(ws: WebSocket, code: number, reason: string, wasClean: boolean): void | Promise; + webSocketError?(ws: WebSocket, error: unknown): void | Promise; +} +type DurableObjectStub = Fetcher< + T, + 'alarm' | 'webSocketMessage' | 'webSocketClose' | 'webSocketError' +> & { + readonly id: DurableObjectId; + readonly name?: string; +}; +interface DurableObjectId { + toString(): string; + equals(other: DurableObjectId): boolean; + readonly name?: string; +} +declare abstract class DurableObjectNamespace { + newUniqueId(options?: DurableObjectNamespaceNewUniqueIdOptions): DurableObjectId; + idFromName(name: string): DurableObjectId; + idFromString(id: string): DurableObjectId; + get(id: DurableObjectId, options?: DurableObjectNamespaceGetDurableObjectOptions): DurableObjectStub; + getByName(name: string, options?: DurableObjectNamespaceGetDurableObjectOptions): DurableObjectStub; + jurisdiction(jurisdiction: DurableObjectJurisdiction): DurableObjectNamespace; +} +type DurableObjectJurisdiction = 'eu' | 'fedramp' | 'fedramp-high'; +interface DurableObjectNamespaceNewUniqueIdOptions { + jurisdiction?: DurableObjectJurisdiction; +} +type DurableObjectLocationHint = 'wnam' | 'enam' | 'sam' | 'weur' | 'eeur' | 'apac' | 'oc' | 'afr' | 'me'; +type DurableObjectRoutingMode = 'primary-only'; +interface DurableObjectNamespaceGetDurableObjectOptions { + locationHint?: DurableObjectLocationHint; + routingMode?: DurableObjectRoutingMode; +} +interface DurableObjectClass<_T extends Rpc.DurableObjectBranded | undefined = undefined> {} +interface DurableObjectState { + waitUntil(promise: Promise): void; + readonly exports: Cloudflare.Exports; + readonly props: Props; + readonly id: DurableObjectId; + readonly storage: DurableObjectStorage; + container?: Container; + blockConcurrencyWhile(callback: () => Promise): Promise; + acceptWebSocket(ws: WebSocket, tags?: string[]): void; + getWebSockets(tag?: string): WebSocket[]; + setWebSocketAutoResponse(maybeReqResp?: WebSocketRequestResponsePair): void; + getWebSocketAutoResponse(): WebSocketRequestResponsePair | null; + getWebSocketAutoResponseTimestamp(ws: WebSocket): Date | null; + setHibernatableWebSocketEventTimeout(timeoutMs?: number): void; + getHibernatableWebSocketEventTimeout(): number | null; + getTags(ws: WebSocket): string[]; + abort(reason?: string): void; +} +interface DurableObjectTransaction { + get(key: string, options?: DurableObjectGetOptions): Promise; + get(keys: string[], options?: DurableObjectGetOptions): Promise>; + list(options?: DurableObjectListOptions): Promise>; + put(key: string, value: T, options?: DurableObjectPutOptions): Promise; + put(entries: Record, options?: DurableObjectPutOptions): Promise; + delete(key: string, options?: DurableObjectPutOptions): Promise; + delete(keys: string[], options?: DurableObjectPutOptions): Promise; + rollback(): void; + getAlarm(options?: DurableObjectGetAlarmOptions): Promise; + setAlarm(scheduledTime: number | Date, options?: DurableObjectSetAlarmOptions): Promise; + deleteAlarm(options?: DurableObjectSetAlarmOptions): Promise; +} +interface DurableObjectStorage { + get(key: string, options?: DurableObjectGetOptions): Promise; + get(keys: string[], options?: DurableObjectGetOptions): Promise>; + list(options?: DurableObjectListOptions): Promise>; + put(key: string, value: T, options?: DurableObjectPutOptions): Promise; + put(entries: Record, options?: DurableObjectPutOptions): Promise; + delete(key: string, options?: DurableObjectPutOptions): Promise; + delete(keys: string[], options?: DurableObjectPutOptions): Promise; + deleteAll(options?: DurableObjectPutOptions): Promise; + transaction(closure: (txn: DurableObjectTransaction) => Promise): Promise; + getAlarm(options?: DurableObjectGetAlarmOptions): Promise; + setAlarm(scheduledTime: number | Date, options?: DurableObjectSetAlarmOptions): Promise; + deleteAlarm(options?: DurableObjectSetAlarmOptions): Promise; + sync(): Promise; + sql: SqlStorage; + kv: SyncKvStorage; + transactionSync(closure: () => T): T; + getCurrentBookmark(): Promise; + getBookmarkForTime(timestamp: number | Date): Promise; + onNextSessionRestoreBookmark(bookmark: string): Promise; +} +interface DurableObjectListOptions { + start?: string; + startAfter?: string; + end?: string; + prefix?: string; + reverse?: boolean; + limit?: number; + allowConcurrency?: boolean; + noCache?: boolean; +} +interface DurableObjectGetOptions { + allowConcurrency?: boolean; + noCache?: boolean; +} +interface DurableObjectGetAlarmOptions { + allowConcurrency?: boolean; +} +interface DurableObjectPutOptions { + allowConcurrency?: boolean; + allowUnconfirmed?: boolean; + noCache?: boolean; +} +interface DurableObjectSetAlarmOptions { + allowConcurrency?: boolean; + allowUnconfirmed?: boolean; +} +declare class WebSocketRequestResponsePair { + constructor(request: string, response: string); + get request(): string; + get response(): string; +} +interface AnalyticsEngineDataset { + writeDataPoint(event?: AnalyticsEngineDataPoint): void; +} +interface AnalyticsEngineDataPoint { + indexes?: ((ArrayBuffer | string) | null)[]; + doubles?: number[]; + blobs?: ((ArrayBuffer | string) | null)[]; +} +/** + * The **`Event`** interface represents an event which takes place on an `EventTarget`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event) + */ +declare class Event { + constructor(type: string, init?: EventInit); + /** + * The **`type`** read-only property of the Event interface returns a string containing the event's type. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/type) + */ + get type(): string; + /** + * The **`eventPhase`** read-only property of the being evaluated. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/eventPhase) + */ + get eventPhase(): number; + /** + * The read-only **`composed`** property of the or not the event will propagate across the shadow DOM boundary into the standard DOM. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/composed) + */ + get composed(): boolean; + /** + * The **`bubbles`** read-only property of the Event interface indicates whether the event bubbles up through the DOM tree or not. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/bubbles) + */ + get bubbles(): boolean; + /** + * The **`cancelable`** read-only property of the Event interface indicates whether the event can be canceled, and therefore prevented as if the event never happened. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelable) + */ + get cancelable(): boolean; + /** + * The **`defaultPrevented`** read-only property of the Event interface returns a boolean value indicating whether or not the call to Event.preventDefault() canceled the event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/defaultPrevented) + */ + get defaultPrevented(): boolean; + /** + * The Event property **`returnValue`** indicates whether the default action for this event has been prevented or not. + * @deprecated + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/returnValue) + */ + get returnValue(): boolean; + /** + * The **`currentTarget`** read-only property of the Event interface identifies the element to which the event handler has been attached. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/currentTarget) + */ + get currentTarget(): EventTarget | undefined; + /** + * The read-only **`target`** property of the dispatched. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/target) + */ + get target(): EventTarget | undefined; + /** + * The deprecated **`Event.srcElement`** is an alias for the Event.target property. + * @deprecated + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/srcElement) + */ + get srcElement(): EventTarget | undefined; + /** + * The **`timeStamp`** read-only property of the Event interface returns the time (in milliseconds) at which the event was created. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/timeStamp) + */ + get timeStamp(): number; + /** + * The **`isTrusted`** read-only property of the when the event was generated by the user agent (including via user actions and programmatic methods such as HTMLElement.focus()), and `false` when the event was dispatched via The only exception is the `click` event, which initializes the `isTrusted` property to `false` in user agents. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/isTrusted) + */ + get isTrusted(): boolean; + /** + * The **`cancelBubble`** property of the Event interface is deprecated. + * @deprecated + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelBubble) + */ + get cancelBubble(): boolean; + /** + * The **`cancelBubble`** property of the Event interface is deprecated. + * @deprecated + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/cancelBubble) + */ + set cancelBubble(value: boolean); + /** + * The **`stopImmediatePropagation()`** method of the If several listeners are attached to the same element for the same event type, they are called in the order in which they were added. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/stopImmediatePropagation) + */ + stopImmediatePropagation(): void; + /** + * The **`preventDefault()`** method of the Event interface tells the user agent that if the event does not get explicitly handled, its default action should not be taken as it normally would be. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/preventDefault) + */ + preventDefault(): void; + /** + * The **`stopPropagation()`** method of the Event interface prevents further propagation of the current event in the capturing and bubbling phases. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/stopPropagation) + */ + stopPropagation(): void; + /** + * The **`composedPath()`** method of the Event interface returns the event's path which is an array of the objects on which listeners will be invoked. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Event/composedPath) + */ + composedPath(): EventTarget[]; + static readonly NONE: number; + static readonly CAPTURING_PHASE: number; + static readonly AT_TARGET: number; + static readonly BUBBLING_PHASE: number; +} +interface EventInit { + bubbles?: boolean; + cancelable?: boolean; + composed?: boolean; +} +type EventListener = (event: EventType) => void; +interface EventListenerObject { + handleEvent(event: EventType): void; +} +type EventListenerOrEventListenerObject = + | EventListener + | EventListenerObject; +/** + * The **`EventTarget`** interface is implemented by objects that can receive events and may have listeners for them. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget) + */ +declare class EventTarget = Record> { + constructor(); + /** + * The **`addEventListener()`** method of the EventTarget interface sets up a function that will be called whenever the specified event is delivered to the target. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/addEventListener) + */ + addEventListener( + type: Type, + handler: EventListenerOrEventListenerObject, + options?: EventTargetAddEventListenerOptions | boolean, + ): void; + /** + * The **`removeEventListener()`** method of the EventTarget interface removes an event listener previously registered with EventTarget.addEventListener() from the target. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/removeEventListener) + */ + removeEventListener( + type: Type, + handler: EventListenerOrEventListenerObject, + options?: EventTargetEventListenerOptions | boolean, + ): void; + /** + * The **`dispatchEvent()`** method of the EventTarget sends an Event to the object, (synchronously) invoking the affected event listeners in the appropriate order. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventTarget/dispatchEvent) + */ + dispatchEvent(event: EventMap[keyof EventMap]): boolean; +} +interface EventTargetEventListenerOptions { + capture?: boolean; +} +interface EventTargetAddEventListenerOptions { + capture?: boolean; + passive?: boolean; + once?: boolean; + signal?: AbortSignal; +} +interface EventTargetHandlerObject { + handleEvent: (event: Event) => any | undefined; +} +/** + * The **`AbortController`** interface represents a controller object that allows you to abort one or more Web requests as and when desired. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController) + */ +declare class AbortController { + constructor(); + /** + * The **`signal`** read-only property of the AbortController interface returns an AbortSignal object instance, which can be used to communicate with/abort an asynchronous operation as desired. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController/signal) + */ + get signal(): AbortSignal; + /** + * The **`abort()`** method of the AbortController interface aborts an asynchronous operation before it has completed. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortController/abort) + */ + abort(reason?: any): void; +} +/** + * The **`AbortSignal`** interface represents a signal object that allows you to communicate with an asynchronous operation (such as a fetch request) and abort it if required via an AbortController object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal) + */ +declare abstract class AbortSignal extends EventTarget { + /** + * The **`AbortSignal.abort()`** static method returns an AbortSignal that is already set as aborted (and which does not trigger an AbortSignal/abort_event event). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_static) + */ + static abort(reason?: any): AbortSignal; + /** + * The **`AbortSignal.timeout()`** static method returns an AbortSignal that will automatically abort after a specified time. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/timeout_static) + */ + static timeout(delay: number): AbortSignal; + /** + * The **`AbortSignal.any()`** static method takes an iterable of abort signals and returns an AbortSignal. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/any_static) + */ + static any(signals: AbortSignal[]): AbortSignal; + /** + * The **`aborted`** read-only property returns a value that indicates whether the asynchronous operations the signal is communicating with are aborted (`true`) or not (`false`). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/aborted) + */ + get aborted(): boolean; + /** + * The **`reason`** read-only property returns a JavaScript value that indicates the abort reason. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/reason) + */ + get reason(): any; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_event) */ + get onabort(): any | null; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/abort_event) */ + set onabort(value: any | null); + /** + * The **`throwIfAborted()`** method throws the signal's abort AbortSignal.reason if the signal has been aborted; otherwise it does nothing. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/AbortSignal/throwIfAborted) + */ + throwIfAborted(): void; +} +interface Scheduler { + wait(delay: number, maybeOptions?: SchedulerWaitOptions): Promise; +} +interface SchedulerWaitOptions { + signal?: AbortSignal; +} +/** + * The **`ExtendableEvent`** interface extends the lifetime of the `install` and `activate` events dispatched on the global scope as part of the service worker lifecycle. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ExtendableEvent) + */ +declare abstract class ExtendableEvent extends Event { + /** + * The **`ExtendableEvent.waitUntil()`** method tells the event dispatcher that work is ongoing. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ExtendableEvent/waitUntil) + */ + waitUntil(promise: Promise): void; +} +/** + * The **`CustomEvent`** interface represents events initialized by an application for any purpose. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CustomEvent) + */ +declare class CustomEvent extends Event { + constructor(type: string, init?: CustomEventCustomEventInit); + /** + * The read-only **`detail`** property of the CustomEvent interface returns any data passed when initializing the event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CustomEvent/detail) + */ + get detail(): T; +} +interface CustomEventCustomEventInit { + bubbles?: boolean; + cancelable?: boolean; + composed?: boolean; + detail?: any; +} +/** + * The **`Blob`** interface represents a blob, which is a file-like object of immutable, raw data; they can be read as text or binary data, or converted into a ReadableStream so its methods can be used for processing the data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob) + */ +declare class Blob { + constructor(type?: ((ArrayBuffer | ArrayBufferView) | string | Blob)[], options?: BlobOptions); + /** + * The **`size`** read-only property of the Blob interface returns the size of the Blob or File in bytes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/size) + */ + get size(): number; + /** + * The **`type`** read-only property of the Blob interface returns the MIME type of the file. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/type) + */ + get type(): string; + /** + * The **`slice()`** method of the Blob interface creates and returns a new `Blob` object which contains data from a subset of the blob on which it's called. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/slice) + */ + slice(start?: number, end?: number, type?: string): Blob; + /** + * The **`arrayBuffer()`** method of the Blob interface returns a Promise that resolves with the contents of the blob as binary data contained in an ArrayBuffer. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/arrayBuffer) + */ + arrayBuffer(): Promise; + /** + * The **`bytes()`** method of the Blob interface returns a Promise that resolves with a Uint8Array containing the contents of the blob as an array of bytes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/bytes) + */ + bytes(): Promise; + /** + * The **`text()`** method of the string containing the contents of the blob, interpreted as UTF-8. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/text) + */ + text(): Promise; + /** + * The **`stream()`** method of the Blob interface returns a ReadableStream which upon reading returns the data contained within the `Blob`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Blob/stream) + */ + stream(): ReadableStream; +} +interface BlobOptions { + type?: string; +} +/** + * The **`File`** interface provides information about files and allows JavaScript in a web page to access their content. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File) + */ +declare class File extends Blob { + constructor( + bits: ((ArrayBuffer | ArrayBufferView) | string | Blob)[] | undefined, + name: string, + options?: FileOptions, + ); + /** + * The **`name`** read-only property of the File interface returns the name of the file represented by a File object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/name) + */ + get name(): string; + /** + * The **`lastModified`** read-only property of the File interface provides the last modified date of the file as the number of milliseconds since the Unix epoch (January 1, 1970 at midnight). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/File/lastModified) + */ + get lastModified(): number; +} +interface FileOptions { + type?: string; + lastModified?: number; +} +/** + * The Cache API allows fine grained control of reading and writing from the Cloudflare global network cache. + * + * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/) + */ +declare abstract class CacheStorage { + /** + * The **`open()`** method of the the Cache object matching the `cacheName`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CacheStorage/open) + */ + open(cacheName: string): Promise; + readonly default: Cache; +} +/** + * The Cache API allows fine grained control of reading and writing from the Cloudflare global network cache. + * + * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/) + */ +declare abstract class Cache { + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/#delete) */ + delete(request: RequestInfo | URL, options?: CacheQueryOptions): Promise; + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/#match) */ + match(request: RequestInfo | URL, options?: CacheQueryOptions): Promise; + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/cache/#put) */ + put(request: RequestInfo | URL, response: Response): Promise; +} +interface CacheQueryOptions { + ignoreMethod?: boolean; +} +/** + * The Web Crypto API provides a set of low-level functions for common cryptographic tasks. + * The Workers runtime implements the full surface of this API, but with some differences in + * the [supported algorithms](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/#supported-algorithms) + * compared to those implemented in most browsers. + * + * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/web-crypto/) + */ +declare abstract class Crypto { + /** + * The **`Crypto.subtle`** read-only property returns a cryptographic operations. + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/subtle) + */ + get subtle(): SubtleCrypto; + /** + * The **`Crypto.getRandomValues()`** method lets you get cryptographically strong random values. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/getRandomValues) + */ + getRandomValues< + T extends + | Int8Array + | Uint8Array + | Int16Array + | Uint16Array + | Int32Array + | Uint32Array + | BigInt64Array + | BigUint64Array, + >(buffer: T): T; + /** + * The **`randomUUID()`** method of the Crypto interface is used to generate a v4 UUID using a cryptographically secure random number generator. + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Crypto/randomUUID) + */ + randomUUID(): string; + DigestStream: typeof DigestStream; +} +/** + * The **`SubtleCrypto`** interface of the Web Crypto API provides a number of low-level cryptographic functions. + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto) + */ +declare abstract class SubtleCrypto { + /** + * The **`encrypt()`** method of the SubtleCrypto interface encrypts data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/encrypt) + */ + encrypt( + algorithm: string | SubtleCryptoEncryptAlgorithm, + key: CryptoKey, + plainText: ArrayBuffer | ArrayBufferView, + ): Promise; + /** + * The **`decrypt()`** method of the SubtleCrypto interface decrypts some encrypted data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/decrypt) + */ + decrypt( + algorithm: string | SubtleCryptoEncryptAlgorithm, + key: CryptoKey, + cipherText: ArrayBuffer | ArrayBufferView, + ): Promise; + /** + * The **`sign()`** method of the SubtleCrypto interface generates a digital signature. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/sign) + */ + sign( + algorithm: string | SubtleCryptoSignAlgorithm, + key: CryptoKey, + data: ArrayBuffer | ArrayBufferView, + ): Promise; + /** + * The **`verify()`** method of the SubtleCrypto interface verifies a digital signature. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/verify) + */ + verify( + algorithm: string | SubtleCryptoSignAlgorithm, + key: CryptoKey, + signature: ArrayBuffer | ArrayBufferView, + data: ArrayBuffer | ArrayBufferView, + ): Promise; + /** + * The **`digest()`** method of the SubtleCrypto interface generates a _digest_ of the given data, using the specified hash function. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/digest) + */ + digest(algorithm: string | SubtleCryptoHashAlgorithm, data: ArrayBuffer | ArrayBufferView): Promise; + /** + * The **`generateKey()`** method of the SubtleCrypto interface is used to generate a new key (for symmetric algorithms) or key pair (for public-key algorithms). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/generateKey) + */ + generateKey( + algorithm: string | SubtleCryptoGenerateKeyAlgorithm, + extractable: boolean, + keyUsages: string[], + ): Promise; + /** + * The **`deriveKey()`** method of the SubtleCrypto interface can be used to derive a secret key from a master key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/deriveKey) + */ + deriveKey( + algorithm: string | SubtleCryptoDeriveKeyAlgorithm, + baseKey: CryptoKey, + derivedKeyAlgorithm: string | SubtleCryptoImportKeyAlgorithm, + extractable: boolean, + keyUsages: string[], + ): Promise; + /** + * The **`deriveBits()`** method of the key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/deriveBits) + */ + deriveBits( + algorithm: string | SubtleCryptoDeriveKeyAlgorithm, + baseKey: CryptoKey, + length?: number | null, + ): Promise; + /** + * The **`importKey()`** method of the SubtleCrypto interface imports a key: that is, it takes as input a key in an external, portable format and gives you a CryptoKey object that you can use in the Web Crypto API. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/importKey) + */ + importKey( + format: string, + keyData: (ArrayBuffer | ArrayBufferView) | JsonWebKey, + algorithm: string | SubtleCryptoImportKeyAlgorithm, + extractable: boolean, + keyUsages: string[], + ): Promise; + /** + * The **`exportKey()`** method of the SubtleCrypto interface exports a key: that is, it takes as input a CryptoKey object and gives you the key in an external, portable format. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/exportKey) + */ + exportKey(format: string, key: CryptoKey): Promise; + /** + * The **`wrapKey()`** method of the SubtleCrypto interface 'wraps' a key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/wrapKey) + */ + wrapKey( + format: string, + key: CryptoKey, + wrappingKey: CryptoKey, + wrapAlgorithm: string | SubtleCryptoEncryptAlgorithm, + ): Promise; + /** + * The **`unwrapKey()`** method of the SubtleCrypto interface 'unwraps' a key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/SubtleCrypto/unwrapKey) + */ + unwrapKey( + format: string, + wrappedKey: ArrayBuffer | ArrayBufferView, + unwrappingKey: CryptoKey, + unwrapAlgorithm: string | SubtleCryptoEncryptAlgorithm, + unwrappedKeyAlgorithm: string | SubtleCryptoImportKeyAlgorithm, + extractable: boolean, + keyUsages: string[], + ): Promise; + timingSafeEqual(a: ArrayBuffer | ArrayBufferView, b: ArrayBuffer | ArrayBufferView): boolean; +} +/** + * The **`CryptoKey`** interface of the Web Crypto API represents a cryptographic key obtained from one of the SubtleCrypto methods SubtleCrypto.generateKey, SubtleCrypto.deriveKey, SubtleCrypto.importKey, or SubtleCrypto.unwrapKey. + * Available only in secure contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey) + */ +declare abstract class CryptoKey { + /** + * The read-only **`type`** property of the CryptoKey interface indicates which kind of key is represented by the object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/type) + */ + readonly type: string; + /** + * The read-only **`extractable`** property of the CryptoKey interface indicates whether or not the key may be extracted using `SubtleCrypto.exportKey()` or `SubtleCrypto.wrapKey()`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/extractable) + */ + readonly extractable: boolean; + /** + * The read-only **`algorithm`** property of the CryptoKey interface returns an object describing the algorithm for which this key can be used, and any associated extra parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/algorithm) + */ + readonly algorithm: + | CryptoKeyKeyAlgorithm + | CryptoKeyAesKeyAlgorithm + | CryptoKeyHmacKeyAlgorithm + | CryptoKeyRsaKeyAlgorithm + | CryptoKeyEllipticKeyAlgorithm + | CryptoKeyArbitraryKeyAlgorithm; + /** + * The read-only **`usages`** property of the CryptoKey interface indicates what can be done with the key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CryptoKey/usages) + */ + readonly usages: string[]; +} +interface CryptoKeyPair { + publicKey: CryptoKey; + privateKey: CryptoKey; +} +interface JsonWebKey { + kty: string; + use?: string; + key_ops?: string[]; + alg?: string; + ext?: boolean; + crv?: string; + x?: string; + y?: string; + d?: string; + n?: string; + e?: string; + p?: string; + q?: string; + dp?: string; + dq?: string; + qi?: string; + oth?: RsaOtherPrimesInfo[]; + k?: string; +} +interface RsaOtherPrimesInfo { + r?: string; + d?: string; + t?: string; +} +interface SubtleCryptoDeriveKeyAlgorithm { + name: string; + salt?: ArrayBuffer | ArrayBufferView; + iterations?: number; + hash?: string | SubtleCryptoHashAlgorithm; + $public?: CryptoKey; + info?: ArrayBuffer | ArrayBufferView; +} +interface SubtleCryptoEncryptAlgorithm { + name: string; + iv?: ArrayBuffer | ArrayBufferView; + additionalData?: ArrayBuffer | ArrayBufferView; + tagLength?: number; + counter?: ArrayBuffer | ArrayBufferView; + length?: number; + label?: ArrayBuffer | ArrayBufferView; +} +interface SubtleCryptoGenerateKeyAlgorithm { + name: string; + hash?: string | SubtleCryptoHashAlgorithm; + modulusLength?: number; + publicExponent?: ArrayBuffer | ArrayBufferView; + length?: number; + namedCurve?: string; +} +interface SubtleCryptoHashAlgorithm { + name: string; +} +interface SubtleCryptoImportKeyAlgorithm { + name: string; + hash?: string | SubtleCryptoHashAlgorithm; + length?: number; + namedCurve?: string; + compressed?: boolean; +} +interface SubtleCryptoSignAlgorithm { + name: string; + hash?: string | SubtleCryptoHashAlgorithm; + dataLength?: number; + saltLength?: number; +} +interface CryptoKeyKeyAlgorithm { + name: string; +} +interface CryptoKeyAesKeyAlgorithm { + name: string; + length: number; +} +interface CryptoKeyHmacKeyAlgorithm { + name: string; + hash: CryptoKeyKeyAlgorithm; + length: number; +} +interface CryptoKeyRsaKeyAlgorithm { + name: string; + modulusLength: number; + publicExponent: ArrayBuffer | ArrayBufferView; + hash?: CryptoKeyKeyAlgorithm; +} +interface CryptoKeyEllipticKeyAlgorithm { + name: string; + namedCurve: string; +} +interface CryptoKeyArbitraryKeyAlgorithm { + name: string; + hash?: CryptoKeyKeyAlgorithm; + namedCurve?: string; + length?: number; +} +declare class DigestStream extends WritableStream { + constructor(algorithm: string | SubtleCryptoHashAlgorithm); + readonly digest: Promise; + get bytesWritten(): number | bigint; +} +/** + * The **`TextDecoder`** interface represents a decoder for a specific text encoding, such as `UTF-8`, `ISO-8859-2`, `KOI8-R`, `GBK`, etc. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoder) + */ +declare class TextDecoder { + constructor(label?: string, options?: TextDecoderConstructorOptions); + /** + * The **`TextDecoder.decode()`** method returns a string containing text decoded from the buffer passed as a parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoder/decode) + */ + decode(input?: ArrayBuffer | ArrayBufferView, options?: TextDecoderDecodeOptions): string; + get encoding(): string; + get fatal(): boolean; + get ignoreBOM(): boolean; +} +/** + * The **`TextEncoder`** interface takes a stream of code points as input and emits a stream of UTF-8 bytes. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder) + */ +declare class TextEncoder { + constructor(); + /** + * The **`TextEncoder.encode()`** method takes a string as input, and returns a Global_Objects/Uint8Array containing the text given in parameters encoded with the specific method for that TextEncoder object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder/encode) + */ + encode(input?: string): Uint8Array; + /** + * The **`TextEncoder.encodeInto()`** method takes a string to encode and a destination Uint8Array to put resulting UTF-8 encoded text into, and returns a dictionary object indicating the progress of the encoding. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoder/encodeInto) + */ + encodeInto(input: string, buffer: Uint8Array): TextEncoderEncodeIntoResult; + get encoding(): string; +} +interface TextDecoderConstructorOptions { + fatal: boolean; + ignoreBOM: boolean; +} +interface TextDecoderDecodeOptions { + stream: boolean; +} +interface TextEncoderEncodeIntoResult { + read: number; + written: number; +} +/** + * The **`ErrorEvent`** interface represents events providing information related to errors in scripts or in files. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent) + */ +declare class ErrorEvent extends Event { + constructor(type: string, init?: ErrorEventErrorEventInit); + /** + * The **`filename`** read-only property of the ErrorEvent interface returns a string containing the name of the script file in which the error occurred. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/filename) + */ + get filename(): string; + /** + * The **`message`** read-only property of the ErrorEvent interface returns a string containing a human-readable error message describing the problem. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/message) + */ + get message(): string; + /** + * The **`lineno`** read-only property of the ErrorEvent interface returns an integer containing the line number of the script file on which the error occurred. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/lineno) + */ + get lineno(): number; + /** + * The **`colno`** read-only property of the ErrorEvent interface returns an integer containing the column number of the script file on which the error occurred. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/colno) + */ + get colno(): number; + /** + * The **`error`** read-only property of the ErrorEvent interface returns a JavaScript value, such as an Error or DOMException, representing the error associated with this event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ErrorEvent/error) + */ + get error(): any; +} +interface ErrorEventErrorEventInit { + message?: string; + filename?: string; + lineno?: number; + colno?: number; + error?: any; +} +/** + * The **`MessageEvent`** interface represents a message received by a target object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent) + */ +declare class MessageEvent extends Event { + constructor(type: string, initializer: MessageEventInit); + /** + * The **`data`** read-only property of the The data sent by the message emitter; this can be any data type, depending on what originated this event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/data) + */ + readonly data: any; + /** + * The **`origin`** read-only property of the origin of the message emitter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/origin) + */ + readonly origin: string | null; + /** + * The **`lastEventId`** read-only property of the unique ID for the event. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/lastEventId) + */ + readonly lastEventId: string; + /** + * The **`source`** read-only property of the a WindowProxy, MessagePort, or a `MessageEventSource` (which can be a WindowProxy, message emitter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/source) + */ + readonly source: MessagePort | null; + /** + * The **`ports`** read-only property of the containing all MessagePort objects sent with the message, in order. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageEvent/ports) + */ + readonly ports: MessagePort[]; +} +interface MessageEventInit { + data: ArrayBuffer | string; +} +/** + * The **`PromiseRejectionEvent`** interface represents events which are sent to the global script context when JavaScript Promises are rejected. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent) + */ +declare abstract class PromiseRejectionEvent extends Event { + /** + * The PromiseRejectionEvent interface's **`promise`** read-only property indicates the JavaScript rejected. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent/promise) + */ + readonly promise: Promise; + /** + * The PromiseRejectionEvent **`reason`** read-only property is any JavaScript value or Object which provides the reason passed into Promise.reject(). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/PromiseRejectionEvent/reason) + */ + readonly reason: any; +} +/** + * The **`FormData`** interface provides a way to construct a set of key/value pairs representing form fields and their values, which can be sent using the Window/fetch, XMLHttpRequest.send() or navigator.sendBeacon() methods. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData) + */ +declare class FormData { + constructor(); + /** + * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) + */ + append(name: string, value: string | Blob): void; + /** + * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) + */ + append(name: string, value: string): void; + /** + * The **`append()`** method of the FormData interface appends a new value onto an existing key inside a `FormData` object, or adds the key if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/append) + */ + append(name: string, value: Blob, filename?: string): void; + /** + * The **`delete()`** method of the FormData interface deletes a key and its value(s) from a `FormData` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/delete) + */ + delete(name: string): void; + /** + * The **`get()`** method of the FormData interface returns the first value associated with a given key from within a `FormData` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/get) + */ + get(name: string): (File | string) | null; + /** + * The **`getAll()`** method of the FormData interface returns all the values associated with a given key from within a `FormData` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/getAll) + */ + getAll(name: string): (File | string)[]; + /** + * The **`has()`** method of the FormData interface returns whether a `FormData` object contains a certain key. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/has) + */ + has(name: string): boolean; + /** + * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) + */ + set(name: string, value: string | Blob): void; + /** + * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) + */ + set(name: string, value: string): void; + /** + * The **`set()`** method of the FormData interface sets a new value for an existing key inside a `FormData` object, or adds the key/value if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FormData/set) + */ + set(name: string, value: Blob, filename?: string): void; + /* Returns an array of key, value pairs for every entry in the list. */ + entries(): IterableIterator<[key: string, value: File | string]>; + /* Returns a list of keys in the list. */ + keys(): IterableIterator; + /* Returns a list of values in the list. */ + values(): IterableIterator; + forEach( + callback: (this: This, value: File | string, key: string, parent: FormData) => void, + thisArg?: This, + ): void; + [Symbol.iterator](): IterableIterator<[key: string, value: File | string]>; +} +interface ContentOptions { + html?: boolean; +} +declare class HTMLRewriter { + constructor(); + on(selector: string, handlers: HTMLRewriterElementContentHandlers): HTMLRewriter; + onDocument(handlers: HTMLRewriterDocumentContentHandlers): HTMLRewriter; + transform(response: Response): Response; +} +interface HTMLRewriterElementContentHandlers { + element?(element: Element): void | Promise; + comments?(comment: Comment): void | Promise; + text?(element: Text): void | Promise; +} +interface HTMLRewriterDocumentContentHandlers { + doctype?(doctype: Doctype): void | Promise; + comments?(comment: Comment): void | Promise; + text?(text: Text): void | Promise; + end?(end: DocumentEnd): void | Promise; +} +interface Doctype { + readonly name: string | null; + readonly publicId: string | null; + readonly systemId: string | null; +} +interface Element { + tagName: string; + readonly attributes: IterableIterator; + readonly removed: boolean; + readonly namespaceURI: string; + getAttribute(name: string): string | null; + hasAttribute(name: string): boolean; + setAttribute(name: string, value: string): Element; + removeAttribute(name: string): Element; + before(content: string | ReadableStream | Response, options?: ContentOptions): Element; + after(content: string | ReadableStream | Response, options?: ContentOptions): Element; + prepend(content: string | ReadableStream | Response, options?: ContentOptions): Element; + append(content: string | ReadableStream | Response, options?: ContentOptions): Element; + replace(content: string | ReadableStream | Response, options?: ContentOptions): Element; + remove(): Element; + removeAndKeepContent(): Element; + setInnerContent(content: string | ReadableStream | Response, options?: ContentOptions): Element; + onEndTag(handler: (tag: EndTag) => void | Promise): void; +} +interface EndTag { + name: string; + before(content: string | ReadableStream | Response, options?: ContentOptions): EndTag; + after(content: string | ReadableStream | Response, options?: ContentOptions): EndTag; + remove(): EndTag; +} +interface Comment { + text: string; + readonly removed: boolean; + before(content: string, options?: ContentOptions): Comment; + after(content: string, options?: ContentOptions): Comment; + replace(content: string, options?: ContentOptions): Comment; + remove(): Comment; +} +interface Text { + readonly text: string; + readonly lastInTextNode: boolean; + readonly removed: boolean; + before(content: string | ReadableStream | Response, options?: ContentOptions): Text; + after(content: string | ReadableStream | Response, options?: ContentOptions): Text; + replace(content: string | ReadableStream | Response, options?: ContentOptions): Text; + remove(): Text; +} +interface DocumentEnd { + append(content: string, options?: ContentOptions): DocumentEnd; +} +/** + * This is the event type for `fetch` events dispatched on the ServiceWorkerGlobalScope. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent) + */ +declare abstract class FetchEvent extends ExtendableEvent { + /** + * The **`request`** read-only property of the the event handler. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent/request) + */ + readonly request: Request; + /** + * The **`respondWith()`** method of allows you to provide a promise for a Response yourself. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/FetchEvent/respondWith) + */ + respondWith(promise: Response | Promise): void; + passThroughOnException(): void; +} +type HeadersInit = Headers | Iterable> | Record; +/** + * The **`Headers`** interface of the Fetch API allows you to perform various actions on HTTP request and response headers. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers) + */ +declare class Headers { + constructor(init?: HeadersInit); + /** + * The **`get()`** method of the Headers interface returns a byte string of all the values of a header within a `Headers` object with a given name. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/get) + */ + get(name: string): string | null; + getAll(name: string): string[]; + /** + * The **`getSetCookie()`** method of the Headers interface returns an array containing the values of all Set-Cookie headers associated with a response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/getSetCookie) + */ + getSetCookie(): string[]; + /** + * The **`has()`** method of the Headers interface returns a boolean stating whether a `Headers` object contains a certain header. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/has) + */ + has(name: string): boolean; + /** + * The **`set()`** method of the Headers interface sets a new value for an existing header inside a `Headers` object, or adds the header if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/set) + */ + set(name: string, value: string): void; + /** + * The **`append()`** method of the Headers interface appends a new value onto an existing header inside a `Headers` object, or adds the header if it does not already exist. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/append) + */ + append(name: string, value: string): void; + /** + * The **`delete()`** method of the Headers interface deletes a header from the current `Headers` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Headers/delete) + */ + delete(name: string): void; + forEach( + callback: (this: This, value: string, key: string, parent: Headers) => void, + thisArg?: This, + ): void; + /* Returns an iterator allowing to go through all key/value pairs contained in this object. */ + entries(): IterableIterator<[key: string, value: string]>; + /* Returns an iterator allowing to go through all keys of the key/value pairs contained in this object. */ + keys(): IterableIterator; + /* Returns an iterator allowing to go through all values of the key/value pairs contained in this object. */ + values(): IterableIterator; + [Symbol.iterator](): IterableIterator<[key: string, value: string]>; +} +type BodyInit = ReadableStream | string | ArrayBuffer | ArrayBufferView | Blob | URLSearchParams | FormData; +declare abstract class Body { + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/body) */ + get body(): ReadableStream | null; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/bodyUsed) */ + get bodyUsed(): boolean; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/arrayBuffer) */ + arrayBuffer(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/bytes) */ + bytes(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/text) */ + text(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/json) */ + json(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/formData) */ + formData(): Promise; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/blob) */ + blob(): Promise; +} +/** + * The **`Response`** interface of the Fetch API represents the response to a request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response) + */ +declare var Response: { + prototype: Response; + new (body?: BodyInit | null, init?: ResponseInit): Response; + error(): Response; + redirect(url: string, status?: number): Response; + json(any: any, maybeInit?: ResponseInit | Response): Response; +}; +/** + * The **`Response`** interface of the Fetch API represents the response to a request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response) + */ +interface Response extends Body { + /** + * The **`clone()`** method of the Response interface creates a clone of a response object, identical in every way, but stored in a different variable. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/clone) + */ + clone(): Response; + /** + * The **`status`** read-only property of the Response interface contains the HTTP status codes of the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/status) + */ + status: number; + /** + * The **`statusText`** read-only property of the Response interface contains the status message corresponding to the HTTP status code in Response.status. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/statusText) + */ + statusText: string; + /** + * The **`headers`** read-only property of the with the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/headers) + */ + headers: Headers; + /** + * The **`ok`** read-only property of the Response interface contains a Boolean stating whether the response was successful (status in the range 200-299) or not. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/ok) + */ + ok: boolean; + /** + * The **`redirected`** read-only property of the Response interface indicates whether or not the response is the result of a request you made which was redirected. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/redirected) + */ + redirected: boolean; + /** + * The **`url`** read-only property of the Response interface contains the URL of the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/url) + */ + url: string; + webSocket: WebSocket | null; + cf: any | undefined; + /** + * The **`type`** read-only property of the Response interface contains the type of the response. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Response/type) + */ + type: 'default' | 'error'; +} +interface ResponseInit { + status?: number; + statusText?: string; + headers?: HeadersInit; + cf?: any; + webSocket?: WebSocket | null; + encodeBody?: 'automatic' | 'manual'; +} +type RequestInfo> = Request | string; +/** + * The **`Request`** interface of the Fetch API represents a resource request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request) + */ +declare var Request: { + prototype: Request; + new >( + input: RequestInfo | URL, + init?: RequestInit, + ): Request; +}; +/** + * The **`Request`** interface of the Fetch API represents a resource request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request) + */ +interface Request> extends Body { + /** + * The **`clone()`** method of the Request interface creates a copy of the current `Request` object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/clone) + */ + clone(): Request; + /** + * The **`method`** read-only property of the `POST`, etc.) A String indicating the method of the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/method) + */ + method: string; + /** + * The **`url`** read-only property of the Request interface contains the URL of the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/url) + */ + url: string; + /** + * The **`headers`** read-only property of the with the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/headers) + */ + headers: Headers; + /** + * The **`redirect`** read-only property of the Request interface contains the mode for how redirects are handled. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/redirect) + */ + redirect: string; + fetcher: Fetcher | null; + /** + * The read-only **`signal`** property of the Request interface returns the AbortSignal associated with the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/signal) + */ + signal: AbortSignal; + cf?: Cf; + /** + * The **`integrity`** read-only property of the Request interface contains the subresource integrity value of the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/integrity) + */ + integrity: string; + /** + * The **`keepalive`** read-only property of the Request interface contains the request's `keepalive` setting (`true` or `false`), which indicates whether the browser will keep the associated request alive if the page that initiated it is unloaded before the request is complete. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/keepalive) + */ + keepalive: boolean; + /** + * The **`cache`** read-only property of the Request interface contains the cache mode of the request. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Request/cache) + */ + cache?: 'no-store' | 'no-cache'; +} +interface RequestInit { + /* A string to set request's method. */ + method?: string; + /* A Headers object, an object literal, or an array of two-item arrays to set request's headers. */ + headers?: HeadersInit; + /* A BodyInit object or null to set request's body. */ + body?: BodyInit | null; + /* A string indicating whether request follows redirects, results in an error upon encountering a redirect, or returns the redirect (in an opaque fashion). Sets request's redirect. */ + redirect?: string; + fetcher?: Fetcher | null; + cf?: Cf; + /* A string indicating how the request will interact with the browser's cache to set request's cache. */ + cache?: 'no-store' | 'no-cache'; + /* A cryptographic hash of the resource to be fetched by request. Sets request's integrity. */ + integrity?: string; + /* An AbortSignal to set request's signal. */ + signal?: AbortSignal | null; + encodeResponseBody?: 'automatic' | 'manual'; +} +type Service< + T extends + | (new (...args: any[]) => Rpc.WorkerEntrypointBranded) + | Rpc.WorkerEntrypointBranded + | ExportedHandler + | undefined = undefined, +> = T extends new (...args: any[]) => Rpc.WorkerEntrypointBranded + ? Fetcher> + : T extends Rpc.WorkerEntrypointBranded + ? Fetcher + : T extends Exclude + ? never + : Fetcher; +type Fetcher< + T extends Rpc.EntrypointBranded | undefined = undefined, + Reserved extends string = never, +> = (T extends Rpc.EntrypointBranded ? Rpc.Provider : unknown) & { + fetch(input: RequestInfo | URL, init?: RequestInit): Promise; + connect(address: SocketAddress | string, options?: SocketOptions): Socket; +}; +interface KVNamespaceListKey { + name: Key; + expiration?: number; + metadata?: Metadata; +} +type KVNamespaceListResult = + | { + list_complete: false; + keys: KVNamespaceListKey[]; + cursor: string; + cacheStatus: string | null; + } + | { + list_complete: true; + keys: KVNamespaceListKey[]; + cacheStatus: string | null; + }; +interface KVNamespace { + get(key: Key, options?: Partial>): Promise; + get(key: Key, type: 'text'): Promise; + get(key: Key, type: 'json'): Promise; + get(key: Key, type: 'arrayBuffer'): Promise; + get(key: Key, type: 'stream'): Promise; + get(key: Key, options?: KVNamespaceGetOptions<'text'>): Promise; + get(key: Key, options?: KVNamespaceGetOptions<'json'>): Promise; + get(key: Key, options?: KVNamespaceGetOptions<'arrayBuffer'>): Promise; + get(key: Key, options?: KVNamespaceGetOptions<'stream'>): Promise; + get(key: Array, type: 'text'): Promise>; + get(key: Array, type: 'json'): Promise>; + get(key: Array, options?: Partial>): Promise>; + get(key: Array, options?: KVNamespaceGetOptions<'text'>): Promise>; + get( + key: Array, + options?: KVNamespaceGetOptions<'json'>, + ): Promise>; + list(options?: KVNamespaceListOptions): Promise>; + put( + key: Key, + value: string | ArrayBuffer | ArrayBufferView | ReadableStream, + options?: KVNamespacePutOptions, + ): Promise; + getWithMetadata( + key: Key, + options?: Partial>, + ): Promise>; + getWithMetadata( + key: Key, + type: 'text', + ): Promise>; + getWithMetadata( + key: Key, + type: 'json', + ): Promise>; + getWithMetadata( + key: Key, + type: 'arrayBuffer', + ): Promise>; + getWithMetadata( + key: Key, + type: 'stream', + ): Promise>; + getWithMetadata( + key: Key, + options: KVNamespaceGetOptions<'text'>, + ): Promise>; + getWithMetadata( + key: Key, + options: KVNamespaceGetOptions<'json'>, + ): Promise>; + getWithMetadata( + key: Key, + options: KVNamespaceGetOptions<'arrayBuffer'>, + ): Promise>; + getWithMetadata( + key: Key, + options: KVNamespaceGetOptions<'stream'>, + ): Promise>; + getWithMetadata( + key: Array, + type: 'text', + ): Promise>>; + getWithMetadata( + key: Array, + type: 'json', + ): Promise>>; + getWithMetadata( + key: Array, + options?: Partial>, + ): Promise>>; + getWithMetadata( + key: Array, + options?: KVNamespaceGetOptions<'text'>, + ): Promise>>; + getWithMetadata( + key: Array, + options?: KVNamespaceGetOptions<'json'>, + ): Promise>>; + delete(key: Key): Promise; +} +interface KVNamespaceListOptions { + limit?: number; + prefix?: string | null; + cursor?: string | null; +} +interface KVNamespaceGetOptions { + type: Type; + cacheTtl?: number; +} +interface KVNamespacePutOptions { + expiration?: number; + expirationTtl?: number; + metadata?: any | null; +} +interface KVNamespaceGetWithMetadataResult { + value: Value | null; + metadata: Metadata | null; + cacheStatus: string | null; +} +type QueueContentType = 'text' | 'bytes' | 'json' | 'v8'; +interface Queue { + send(message: Body, options?: QueueSendOptions): Promise; + sendBatch(messages: Iterable>, options?: QueueSendBatchOptions): Promise; +} +interface QueueSendOptions { + contentType?: QueueContentType; + delaySeconds?: number; +} +interface QueueSendBatchOptions { + delaySeconds?: number; +} +interface MessageSendRequest { + body: Body; + contentType?: QueueContentType; + delaySeconds?: number; +} +interface QueueRetryOptions { + delaySeconds?: number; +} +interface Message { + readonly id: string; + readonly timestamp: Date; + readonly body: Body; + readonly attempts: number; + retry(options?: QueueRetryOptions): void; + ack(): void; +} +interface QueueEvent extends ExtendableEvent { + readonly messages: readonly Message[]; + readonly queue: string; + retryAll(options?: QueueRetryOptions): void; + ackAll(): void; +} +interface MessageBatch { + readonly messages: readonly Message[]; + readonly queue: string; + retryAll(options?: QueueRetryOptions): void; + ackAll(): void; +} +interface R2Error extends Error { + readonly name: string; + readonly code: number; + readonly message: string; + readonly action: string; + readonly stack: any; +} +interface R2ListOptions { + limit?: number; + prefix?: string; + cursor?: string; + delimiter?: string; + startAfter?: string; + include?: ('httpMetadata' | 'customMetadata')[]; +} +declare abstract class R2Bucket { + head(key: string): Promise; + get( + key: string, + options: R2GetOptions & { + onlyIf: R2Conditional | Headers; + }, + ): Promise; + get(key: string, options?: R2GetOptions): Promise; + put( + key: string, + value: ReadableStream | ArrayBuffer | ArrayBufferView | string | null | Blob, + options?: R2PutOptions & { + onlyIf: R2Conditional | Headers; + }, + ): Promise; + put( + key: string, + value: ReadableStream | ArrayBuffer | ArrayBufferView | string | null | Blob, + options?: R2PutOptions, + ): Promise; + createMultipartUpload(key: string, options?: R2MultipartOptions): Promise; + resumeMultipartUpload(key: string, uploadId: string): R2MultipartUpload; + delete(keys: string | string[]): Promise; + list(options?: R2ListOptions): Promise; +} +interface R2MultipartUpload { + readonly key: string; + readonly uploadId: string; + uploadPart( + partNumber: number, + value: ReadableStream | (ArrayBuffer | ArrayBufferView) | string | Blob, + options?: R2UploadPartOptions, + ): Promise; + abort(): Promise; + complete(uploadedParts: R2UploadedPart[]): Promise; +} +interface R2UploadedPart { + partNumber: number; + etag: string; +} +declare abstract class R2Object { + readonly key: string; + readonly version: string; + readonly size: number; + readonly etag: string; + readonly httpEtag: string; + readonly checksums: R2Checksums; + readonly uploaded: Date; + readonly httpMetadata?: R2HTTPMetadata; + readonly customMetadata?: Record; + readonly range?: R2Range; + readonly storageClass: string; + readonly ssecKeyMd5?: string; + writeHttpMetadata(headers: Headers): void; +} +interface R2ObjectBody extends R2Object { + get body(): ReadableStream; + get bodyUsed(): boolean; + arrayBuffer(): Promise; + bytes(): Promise; + text(): Promise; + json(): Promise; + blob(): Promise; +} +type R2Range = + | { + offset: number; + length?: number; + } + | { + offset?: number; + length: number; + } + | { + suffix: number; + }; +interface R2Conditional { + etagMatches?: string; + etagDoesNotMatch?: string; + uploadedBefore?: Date; + uploadedAfter?: Date; + secondsGranularity?: boolean; +} +interface R2GetOptions { + onlyIf?: R2Conditional | Headers; + range?: R2Range | Headers; + ssecKey?: ArrayBuffer | string; +} +interface R2PutOptions { + onlyIf?: R2Conditional | Headers; + httpMetadata?: R2HTTPMetadata | Headers; + customMetadata?: Record; + md5?: (ArrayBuffer | ArrayBufferView) | string; + sha1?: (ArrayBuffer | ArrayBufferView) | string; + sha256?: (ArrayBuffer | ArrayBufferView) | string; + sha384?: (ArrayBuffer | ArrayBufferView) | string; + sha512?: (ArrayBuffer | ArrayBufferView) | string; + storageClass?: string; + ssecKey?: ArrayBuffer | string; +} +interface R2MultipartOptions { + httpMetadata?: R2HTTPMetadata | Headers; + customMetadata?: Record; + storageClass?: string; + ssecKey?: ArrayBuffer | string; +} +interface R2Checksums { + readonly md5?: ArrayBuffer; + readonly sha1?: ArrayBuffer; + readonly sha256?: ArrayBuffer; + readonly sha384?: ArrayBuffer; + readonly sha512?: ArrayBuffer; + toJSON(): R2StringChecksums; +} +interface R2StringChecksums { + md5?: string; + sha1?: string; + sha256?: string; + sha384?: string; + sha512?: string; +} +interface R2HTTPMetadata { + contentType?: string; + contentLanguage?: string; + contentDisposition?: string; + contentEncoding?: string; + cacheControl?: string; + cacheExpiry?: Date; +} +type R2Objects = { + objects: R2Object[]; + delimitedPrefixes: string[]; +} & ( + | { + truncated: true; + cursor: string; + } + | { + truncated: false; + } +); +interface R2UploadPartOptions { + ssecKey?: ArrayBuffer | string; +} +declare abstract class ScheduledEvent extends ExtendableEvent { + readonly scheduledTime: number; + readonly cron: string; + noRetry(): void; +} +interface ScheduledController { + readonly scheduledTime: number; + readonly cron: string; + noRetry(): void; +} +interface QueuingStrategy { + highWaterMark?: number | bigint; + size?: (chunk: T) => number | bigint; +} +interface UnderlyingSink { + type?: string; + start?: (controller: WritableStreamDefaultController) => void | Promise; + write?: (chunk: W, controller: WritableStreamDefaultController) => void | Promise; + abort?: (reason: any) => void | Promise; + close?: () => void | Promise; +} +interface UnderlyingByteSource { + type: 'bytes'; + autoAllocateChunkSize?: number; + start?: (controller: ReadableByteStreamController) => void | Promise; + pull?: (controller: ReadableByteStreamController) => void | Promise; + cancel?: (reason: any) => void | Promise; +} +interface UnderlyingSource { + type?: '' | undefined; + start?: (controller: ReadableStreamDefaultController) => void | Promise; + pull?: (controller: ReadableStreamDefaultController) => void | Promise; + cancel?: (reason: any) => void | Promise; + expectedLength?: number | bigint; +} +interface Transformer { + readableType?: string; + writableType?: string; + start?: (controller: TransformStreamDefaultController) => void | Promise; + transform?: (chunk: I, controller: TransformStreamDefaultController) => void | Promise; + flush?: (controller: TransformStreamDefaultController) => void | Promise; + cancel?: (reason: any) => void | Promise; + expectedLength?: number; +} +interface StreamPipeOptions { + preventAbort?: boolean; + preventCancel?: boolean; + /** + * Pipes this readable stream to a given writable stream destination. The way in which the piping process behaves under various error conditions can be customized with a number of passed options. It returns a promise that fulfills when the piping process completes successfully, or rejects if any errors were encountered. + * + * Piping a stream will lock it for the duration of the pipe, preventing any other consumer from acquiring a reader. + * + * Errors and closures of the source and destination streams propagate as follows: + * + * An error in this source readable stream will abort destination, unless preventAbort is truthy. The returned promise will be rejected with the source's error, or with any error that occurs during aborting the destination. + * + * An error in destination will cancel this source readable stream, unless preventCancel is truthy. The returned promise will be rejected with the destination's error, or with any error that occurs during canceling the source. + * + * When this source readable stream closes, destination will be closed, unless preventClose is truthy. The returned promise will be fulfilled once this process completes, unless an error is encountered while closing the destination, in which case it will be rejected with that error. + * + * If destination starts out closed or closing, this source readable stream will be canceled, unless preventCancel is true. The returned promise will be rejected with an error indicating piping to a closed stream failed, or with any error that occurs during canceling the source. + * + * The signal option can be set to an AbortSignal to allow aborting an ongoing pipe operation via the corresponding AbortController. In this case, this source readable stream will be canceled, and destination aborted, unless the respective options preventCancel or preventAbort are set. + */ + preventClose?: boolean; + signal?: AbortSignal; +} +type ReadableStreamReadResult = + | { + done: false; + value: R; + } + | { + done: true; + value?: undefined; + }; +/** + * The `ReadableStream` interface of the Streams API represents a readable stream of byte data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream) + */ +interface ReadableStream { + /** + * The **`locked`** read-only property of the ReadableStream interface returns whether or not the readable stream is locked to a reader. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/locked) + */ + get locked(): boolean; + /** + * The **`cancel()`** method of the ReadableStream interface returns a Promise that resolves when the stream is canceled. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/cancel) + */ + cancel(reason?: any): Promise; + /** + * The **`getReader()`** method of the ReadableStream interface creates a reader and locks the stream to it. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/getReader) + */ + getReader(): ReadableStreamDefaultReader; + /** + * The **`getReader()`** method of the ReadableStream interface creates a reader and locks the stream to it. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/getReader) + */ + getReader(options: ReadableStreamGetReaderOptions): ReadableStreamBYOBReader; + /** + * The **`pipeThrough()`** method of the ReadableStream interface provides a chainable way of piping the current stream through a transform stream or any other writable/readable pair. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/pipeThrough) + */ + pipeThrough(transform: ReadableWritablePair, options?: StreamPipeOptions): ReadableStream; + /** + * The **`pipeTo()`** method of the ReadableStream interface pipes the current `ReadableStream` to a given WritableStream and returns a Promise that fulfills when the piping process completes successfully, or rejects if any errors were encountered. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/pipeTo) + */ + pipeTo(destination: WritableStream, options?: StreamPipeOptions): Promise; + /** + * The **`tee()`** method of the two-element array containing the two resulting branches as new ReadableStream instances. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream/tee) + */ + tee(): [ReadableStream, ReadableStream]; + values(options?: ReadableStreamValuesOptions): AsyncIterableIterator; + [Symbol.asyncIterator](options?: ReadableStreamValuesOptions): AsyncIterableIterator; +} +/** + * The `ReadableStream` interface of the Streams API represents a readable stream of byte data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStream) + */ +declare const ReadableStream: { + prototype: ReadableStream; + new (underlyingSource: UnderlyingByteSource, strategy?: QueuingStrategy): ReadableStream; + new (underlyingSource?: UnderlyingSource, strategy?: QueuingStrategy): ReadableStream; +}; +/** + * The **`ReadableStreamDefaultReader`** interface of the Streams API represents a default reader that can be used to read stream data supplied from a network (such as a fetch request). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader) + */ +declare class ReadableStreamDefaultReader { + constructor(stream: ReadableStream); + get closed(): Promise; + cancel(reason?: any): Promise; + /** + * The **`read()`** method of the ReadableStreamDefaultReader interface returns a Promise providing access to the next chunk in the stream's internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader/read) + */ + read(): Promise>; + /** + * The **`releaseLock()`** method of the ReadableStreamDefaultReader interface releases the reader's lock on the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultReader/releaseLock) + */ + releaseLock(): void; +} +/** + * The `ReadableStreamBYOBReader` interface of the Streams API defines a reader for a ReadableStream that supports zero-copy reading from an underlying byte source. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader) + */ +declare class ReadableStreamBYOBReader { + constructor(stream: ReadableStream); + get closed(): Promise; + cancel(reason?: any): Promise; + /** + * The **`read()`** method of the ReadableStreamBYOBReader interface is used to read data into a view on a user-supplied buffer from an associated readable byte stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader/read) + */ + read(view: T): Promise>; + /** + * The **`releaseLock()`** method of the ReadableStreamBYOBReader interface releases the reader's lock on the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBReader/releaseLock) + */ + releaseLock(): void; + readAtLeast(minElements: number, view: T): Promise>; +} +interface ReadableStreamBYOBReaderReadableStreamBYOBReaderReadOptions { + min?: number; +} +interface ReadableStreamGetReaderOptions { + /** + * Creates a ReadableStreamBYOBReader and locks the stream to the new reader. + * + * This call behaves the same way as the no-argument variant, except that it only works on readable byte streams, i.e. streams which were constructed specifically with the ability to handle "bring your own buffer" reading. The returned BYOB reader provides the ability to directly read individual chunks from the stream via its read() method, into developer-supplied buffers, allowing more precise control over allocation. + */ + mode: 'byob'; +} +/** + * The **`ReadableStreamBYOBRequest`** interface of the Streams API represents a 'pull request' for data from an underlying source that will made as a zero-copy transfer to a consumer (bypassing the stream's internal queues). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest) + */ +declare abstract class ReadableStreamBYOBRequest { + /** + * The **`view`** getter property of the ReadableStreamBYOBRequest interface returns the current view. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/view) + */ + get view(): Uint8Array | null; + /** + * The **`respond()`** method of the ReadableStreamBYOBRequest interface is used to signal to the associated readable byte stream that the specified number of bytes were written into the ReadableStreamBYOBRequest.view. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/respond) + */ + respond(bytesWritten: number): void; + /** + * The **`respondWithNewView()`** method of the ReadableStreamBYOBRequest interface specifies a new view that the consumer of the associated readable byte stream should write to instead of ReadableStreamBYOBRequest.view. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamBYOBRequest/respondWithNewView) + */ + respondWithNewView(view: ArrayBuffer | ArrayBufferView): void; + get atLeast(): number | null; +} +/** + * The **`ReadableStreamDefaultController`** interface of the Streams API represents a controller allowing control of a ReadableStream's state and internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController) + */ +declare abstract class ReadableStreamDefaultController { + /** + * The **`desiredSize`** read-only property of the required to fill the stream's internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/desiredSize) + */ + get desiredSize(): number | null; + /** + * The **`close()`** method of the ReadableStreamDefaultController interface closes the associated stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/close) + */ + close(): void; + /** + * The **`enqueue()`** method of the ```js-nolint enqueue(chunk) ``` - `chunk` - : The chunk to enqueue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/enqueue) + */ + enqueue(chunk?: R): void; + /** + * The **`error()`** method of the with the associated stream to error. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableStreamDefaultController/error) + */ + error(reason: any): void; +} +/** + * The **`ReadableByteStreamController`** interface of the Streams API represents a controller for a readable byte stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController) + */ +declare abstract class ReadableByteStreamController { + /** + * The **`byobRequest`** read-only property of the ReadableByteStreamController interface returns the current BYOB request, or `null` if there are no pending requests. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/byobRequest) + */ + get byobRequest(): ReadableStreamBYOBRequest | null; + /** + * The **`desiredSize`** read-only property of the ReadableByteStreamController interface returns the number of bytes required to fill the stream's internal queue to its 'desired size'. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/desiredSize) + */ + get desiredSize(): number | null; + /** + * The **`close()`** method of the ReadableByteStreamController interface closes the associated stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/close) + */ + close(): void; + /** + * The **`enqueue()`** method of the ReadableByteStreamController interface enqueues a given chunk on the associated readable byte stream (the chunk is copied into the stream's internal queues). + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/enqueue) + */ + enqueue(chunk: ArrayBuffer | ArrayBufferView): void; + /** + * The **`error()`** method of the ReadableByteStreamController interface causes any future interactions with the associated stream to error with the specified reason. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ReadableByteStreamController/error) + */ + error(reason: any): void; +} +/** + * The **`WritableStreamDefaultController`** interface of the Streams API represents a controller allowing control of a WritableStream's state. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController) + */ +declare abstract class WritableStreamDefaultController { + /** + * The read-only **`signal`** property of the WritableStreamDefaultController interface returns the AbortSignal associated with the controller. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController/signal) + */ + get signal(): AbortSignal; + /** + * The **`error()`** method of the with the associated stream to error. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultController/error) + */ + error(reason?: any): void; +} +/** + * The **`TransformStreamDefaultController`** interface of the Streams API provides methods to manipulate the associated ReadableStream and WritableStream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController) + */ +declare abstract class TransformStreamDefaultController { + /** + * The **`desiredSize`** read-only property of the TransformStreamDefaultController interface returns the desired size to fill the queue of the associated ReadableStream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/desiredSize) + */ + get desiredSize(): number | null; + /** + * The **`enqueue()`** method of the TransformStreamDefaultController interface enqueues the given chunk in the readable side of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/enqueue) + */ + enqueue(chunk?: O): void; + /** + * The **`error()`** method of the TransformStreamDefaultController interface errors both sides of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/error) + */ + error(reason: any): void; + /** + * The **`terminate()`** method of the TransformStreamDefaultController interface closes the readable side and errors the writable side of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStreamDefaultController/terminate) + */ + terminate(): void; +} +interface ReadableWritablePair { + readable: ReadableStream; + /** + * Provides a convenient, chainable way of piping this readable stream through a transform stream (or any other { writable, readable } pair). It simply pipes the stream into the writable side of the supplied pair, and returns the readable side for further use. + * + * Piping a stream will lock it for the duration of the pipe, preventing any other consumer from acquiring a reader. + */ + writable: WritableStream; +} +/** + * The **`WritableStream`** interface of the Streams API provides a standard abstraction for writing streaming data to a destination, known as a sink. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream) + */ +declare class WritableStream { + constructor(underlyingSink?: UnderlyingSink, queuingStrategy?: QueuingStrategy); + /** + * The **`locked`** read-only property of the WritableStream interface returns a boolean indicating whether the `WritableStream` is locked to a writer. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/locked) + */ + get locked(): boolean; + /** + * The **`abort()`** method of the WritableStream interface aborts the stream, signaling that the producer can no longer successfully write to the stream and it is to be immediately moved to an error state, with any queued writes discarded. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/abort) + */ + abort(reason?: any): Promise; + /** + * The **`close()`** method of the WritableStream interface closes the associated stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/close) + */ + close(): Promise; + /** + * The **`getWriter()`** method of the WritableStream interface returns a new instance of WritableStreamDefaultWriter and locks the stream to that instance. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStream/getWriter) + */ + getWriter(): WritableStreamDefaultWriter; +} +/** + * The **`WritableStreamDefaultWriter`** interface of the Streams API is the object returned by WritableStream.getWriter() and once created locks the writer to the `WritableStream` ensuring that no other streams can write to the underlying sink. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter) + */ +declare class WritableStreamDefaultWriter { + constructor(stream: WritableStream); + /** + * The **`closed`** read-only property of the the stream errors or the writer's lock is released. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/closed) + */ + get closed(): Promise; + /** + * The **`ready`** read-only property of the that resolves when the desired size of the stream's internal queue transitions from non-positive to positive, signaling that it is no longer applying backpressure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/ready) + */ + get ready(): Promise; + /** + * The **`desiredSize`** read-only property of the to fill the stream's internal queue. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/desiredSize) + */ + get desiredSize(): number | null; + /** + * The **`abort()`** method of the the producer can no longer successfully write to the stream and it is to be immediately moved to an error state, with any queued writes discarded. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/abort) + */ + abort(reason?: any): Promise; + /** + * The **`close()`** method of the stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/close) + */ + close(): Promise; + /** + * The **`write()`** method of the operation. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/write) + */ + write(chunk?: W): Promise; + /** + * The **`releaseLock()`** method of the corresponding stream. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WritableStreamDefaultWriter/releaseLock) + */ + releaseLock(): void; +} +/** + * The **`TransformStream`** interface of the Streams API represents a concrete implementation of the pipe chain _transform stream_ concept. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream) + */ +declare class TransformStream { + constructor( + transformer?: Transformer, + writableStrategy?: QueuingStrategy, + readableStrategy?: QueuingStrategy, + ); + /** + * The **`readable`** read-only property of the TransformStream interface returns the ReadableStream instance controlled by this `TransformStream`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream/readable) + */ + get readable(): ReadableStream; + /** + * The **`writable`** read-only property of the TransformStream interface returns the WritableStream instance controlled by this `TransformStream`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TransformStream/writable) + */ + get writable(): WritableStream; +} +declare class FixedLengthStream extends IdentityTransformStream { + constructor(expectedLength: number | bigint, queuingStrategy?: IdentityTransformStreamQueuingStrategy); +} +declare class IdentityTransformStream extends TransformStream { + constructor(queuingStrategy?: IdentityTransformStreamQueuingStrategy); +} +interface IdentityTransformStreamQueuingStrategy { + highWaterMark?: number | bigint; +} +interface ReadableStreamValuesOptions { + preventCancel?: boolean; +} +/** + * The **`CompressionStream`** interface of the Compression Streams API is an API for compressing a stream of data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CompressionStream) + */ +declare class CompressionStream extends TransformStream { + constructor(format: 'gzip' | 'deflate' | 'deflate-raw'); +} +/** + * The **`DecompressionStream`** interface of the Compression Streams API is an API for decompressing a stream of data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/DecompressionStream) + */ +declare class DecompressionStream extends TransformStream { + constructor(format: 'gzip' | 'deflate' | 'deflate-raw'); +} +/** + * The **`TextEncoderStream`** interface of the Encoding API converts a stream of strings into bytes in the UTF-8 encoding. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextEncoderStream) + */ +declare class TextEncoderStream extends TransformStream { + constructor(); + get encoding(): string; +} +/** + * The **`TextDecoderStream`** interface of the Encoding API converts a stream of text in a binary encoding, such as UTF-8 etc., to a stream of strings. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/TextDecoderStream) + */ +declare class TextDecoderStream extends TransformStream { + constructor(label?: string, options?: TextDecoderStreamTextDecoderStreamInit); + get encoding(): string; + get fatal(): boolean; + get ignoreBOM(): boolean; +} +interface TextDecoderStreamTextDecoderStreamInit { + fatal?: boolean; + ignoreBOM?: boolean; +} +/** + * The **`ByteLengthQueuingStrategy`** interface of the Streams API provides a built-in byte length queuing strategy that can be used when constructing streams. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy) + */ +declare class ByteLengthQueuingStrategy implements QueuingStrategy { + constructor(init: QueuingStrategyInit); + /** + * The read-only **`ByteLengthQueuingStrategy.highWaterMark`** property returns the total number of bytes that can be contained in the internal queue before backpressure is applied. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy/highWaterMark) + */ + get highWaterMark(): number; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/ByteLengthQueuingStrategy/size) */ + get size(): (chunk?: any) => number; +} +/** + * The **`CountQueuingStrategy`** interface of the Streams API provides a built-in chunk counting queuing strategy that can be used when constructing streams. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy) + */ +declare class CountQueuingStrategy implements QueuingStrategy { + constructor(init: QueuingStrategyInit); + /** + * The read-only **`CountQueuingStrategy.highWaterMark`** property returns the total number of chunks that can be contained in the internal queue before backpressure is applied. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy/highWaterMark) + */ + get highWaterMark(): number; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/CountQueuingStrategy/size) */ + get size(): (chunk?: any) => number; +} +interface QueuingStrategyInit { + /** + * Creates a new ByteLengthQueuingStrategy with the provided high water mark. + * + * Note that the provided high water mark will not be validated ahead of time. Instead, if it is negative, NaN, or not a number, the resulting ByteLengthQueuingStrategy will cause the corresponding stream constructor to throw. + */ + highWaterMark: number; +} +interface ScriptVersion { + id?: string; + tag?: string; + message?: string; +} +declare abstract class TailEvent extends ExtendableEvent { + readonly events: TraceItem[]; + readonly traces: TraceItem[]; +} +interface TraceItem { + readonly event: + | ( + | TraceItemFetchEventInfo + | TraceItemJsRpcEventInfo + | TraceItemScheduledEventInfo + | TraceItemAlarmEventInfo + | TraceItemQueueEventInfo + | TraceItemEmailEventInfo + | TraceItemTailEventInfo + | TraceItemCustomEventInfo + | TraceItemHibernatableWebSocketEventInfo + ) + | null; + readonly eventTimestamp: number | null; + readonly logs: TraceLog[]; + readonly exceptions: TraceException[]; + readonly diagnosticsChannelEvents: TraceDiagnosticChannelEvent[]; + readonly scriptName: string | null; + readonly entrypoint?: string; + readonly scriptVersion?: ScriptVersion; + readonly dispatchNamespace?: string; + readonly scriptTags?: string[]; + readonly durableObjectId?: string; + readonly outcome: string; + readonly executionModel: string; + readonly truncated: boolean; + readonly cpuTime: number; + readonly wallTime: number; +} +interface TraceItemAlarmEventInfo { + readonly scheduledTime: Date; +} +interface TraceItemCustomEventInfo {} +interface TraceItemScheduledEventInfo { + readonly scheduledTime: number; + readonly cron: string; +} +interface TraceItemQueueEventInfo { + readonly queue: string; + readonly batchSize: number; +} +interface TraceItemEmailEventInfo { + readonly mailFrom: string; + readonly rcptTo: string; + readonly rawSize: number; +} +interface TraceItemTailEventInfo { + readonly consumedEvents: TraceItemTailEventInfoTailItem[]; +} +interface TraceItemTailEventInfoTailItem { + readonly scriptName: string | null; +} +interface TraceItemFetchEventInfo { + readonly response?: TraceItemFetchEventInfoResponse; + readonly request: TraceItemFetchEventInfoRequest; +} +interface TraceItemFetchEventInfoRequest { + readonly cf?: any; + readonly headers: Record; + readonly method: string; + readonly url: string; + getUnredacted(): TraceItemFetchEventInfoRequest; +} +interface TraceItemFetchEventInfoResponse { + readonly status: number; +} +interface TraceItemJsRpcEventInfo { + readonly rpcMethod: string; +} +interface TraceItemHibernatableWebSocketEventInfo { + readonly getWebSocketEvent: + | TraceItemHibernatableWebSocketEventInfoMessage + | TraceItemHibernatableWebSocketEventInfoClose + | TraceItemHibernatableWebSocketEventInfoError; +} +interface TraceItemHibernatableWebSocketEventInfoMessage { + readonly webSocketEventType: string; +} +interface TraceItemHibernatableWebSocketEventInfoClose { + readonly webSocketEventType: string; + readonly code: number; + readonly wasClean: boolean; +} +interface TraceItemHibernatableWebSocketEventInfoError { + readonly webSocketEventType: string; +} +interface TraceLog { + readonly timestamp: number; + readonly level: string; + readonly message: any; +} +interface TraceException { + readonly timestamp: number; + readonly message: string; + readonly name: string; + readonly stack?: string; +} +interface TraceDiagnosticChannelEvent { + readonly timestamp: number; + readonly channel: string; + readonly message: any; +} +interface TraceMetrics { + readonly cpuTime: number; + readonly wallTime: number; +} +interface UnsafeTraceMetrics { + fromTrace(item: TraceItem): TraceMetrics; +} +/** + * The **`URL`** interface is used to parse, construct, normalize, and encode URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL) + */ +declare class URL { + constructor(url: string | URL, base?: string | URL); + /** + * The **`origin`** read-only property of the URL interface returns a string containing the Unicode serialization of the origin of the represented URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/origin) + */ + get origin(): string; + /** + * The **`href`** property of the URL interface is a string containing the whole URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/href) + */ + get href(): string; + /** + * The **`href`** property of the URL interface is a string containing the whole URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/href) + */ + set href(value: string); + /** + * The **`protocol`** property of the URL interface is a string containing the protocol or scheme of the URL, including the final `':'`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/protocol) + */ + get protocol(): string; + /** + * The **`protocol`** property of the URL interface is a string containing the protocol or scheme of the URL, including the final `':'`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/protocol) + */ + set protocol(value: string); + /** + * The **`username`** property of the URL interface is a string containing the username component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/username) + */ + get username(): string; + /** + * The **`username`** property of the URL interface is a string containing the username component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/username) + */ + set username(value: string); + /** + * The **`password`** property of the URL interface is a string containing the password component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/password) + */ + get password(): string; + /** + * The **`password`** property of the URL interface is a string containing the password component of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/password) + */ + set password(value: string); + /** + * The **`host`** property of the URL interface is a string containing the host, which is the URL.hostname, and then, if the port of the URL is nonempty, a `':'`, followed by the URL.port of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/host) + */ + get host(): string; + /** + * The **`host`** property of the URL interface is a string containing the host, which is the URL.hostname, and then, if the port of the URL is nonempty, a `':'`, followed by the URL.port of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/host) + */ + set host(value: string); + /** + * The **`hostname`** property of the URL interface is a string containing either the domain name or IP address of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hostname) + */ + get hostname(): string; + /** + * The **`hostname`** property of the URL interface is a string containing either the domain name or IP address of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hostname) + */ + set hostname(value: string); + /** + * The **`port`** property of the URL interface is a string containing the port number of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/port) + */ + get port(): string; + /** + * The **`port`** property of the URL interface is a string containing the port number of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/port) + */ + set port(value: string); + /** + * The **`pathname`** property of the URL interface represents a location in a hierarchical structure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/pathname) + */ + get pathname(): string; + /** + * The **`pathname`** property of the URL interface represents a location in a hierarchical structure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/pathname) + */ + set pathname(value: string); + /** + * The **`search`** property of the URL interface is a search string, also called a _query string_, that is a string containing a `'?'` followed by the parameters of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/search) + */ + get search(): string; + /** + * The **`search`** property of the URL interface is a search string, also called a _query string_, that is a string containing a `'?'` followed by the parameters of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/search) + */ + set search(value: string); + /** + * The **`hash`** property of the URL interface is a string containing a `'#'` followed by the fragment identifier of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hash) + */ + get hash(): string; + /** + * The **`hash`** property of the URL interface is a string containing a `'#'` followed by the fragment identifier of the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/hash) + */ + set hash(value: string); + /** + * The **`searchParams`** read-only property of the access to the [MISSING: httpmethod('GET')] decoded query arguments contained in the URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/searchParams) + */ + get searchParams(): URLSearchParams; + /** + * The **`toJSON()`** method of the URL interface returns a string containing a serialized version of the URL, although in practice it seems to have the same effect as ```js-nolint toJSON() ``` None. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/toJSON) + */ + toJSON(): string; + /*function toString() { [native code] }*/ + toString(): string; + /** + * The **`URL.canParse()`** static method of the URL interface returns a boolean indicating whether or not an absolute URL, or a relative URL combined with a base URL, are parsable and valid. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/canParse_static) + */ + static canParse(url: string, base?: string): boolean; + /** + * The **`URL.parse()`** static method of the URL interface returns a newly created URL object representing the URL defined by the parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/parse_static) + */ + static parse(url: string, base?: string): URL | null; + /** + * The **`createObjectURL()`** static method of the URL interface creates a string containing a URL representing the object given in the parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/createObjectURL_static) + */ + static createObjectURL(object: File | Blob): string; + /** + * The **`revokeObjectURL()`** static method of the URL interface releases an existing object URL which was previously created by calling Call this method when you've finished using an object URL to let the browser know not to keep the reference to the file any longer. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URL/revokeObjectURL_static) + */ + static revokeObjectURL(object_url: string): void; +} +/** + * The **`URLSearchParams`** interface defines utility methods to work with the query string of a URL. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams) + */ +declare class URLSearchParams { + constructor(init?: Iterable> | Record | string); + /** + * The **`size`** read-only property of the URLSearchParams interface indicates the total number of search parameter entries. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/size) + */ + get size(): number; + /** + * The **`append()`** method of the URLSearchParams interface appends a specified key/value pair as a new search parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/append) + */ + append(name: string, value: string): void; + /** + * The **`delete()`** method of the URLSearchParams interface deletes specified parameters and their associated value(s) from the list of all search parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/delete) + */ + delete(name: string, value?: string): void; + /** + * The **`get()`** method of the URLSearchParams interface returns the first value associated to the given search parameter. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/get) + */ + get(name: string): string | null; + /** + * The **`getAll()`** method of the URLSearchParams interface returns all the values associated with a given search parameter as an array. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/getAll) + */ + getAll(name: string): string[]; + /** + * The **`has()`** method of the URLSearchParams interface returns a boolean value that indicates whether the specified parameter is in the search parameters. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/has) + */ + has(name: string, value?: string): boolean; + /** + * The **`set()`** method of the URLSearchParams interface sets the value associated with a given search parameter to the given value. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/set) + */ + set(name: string, value: string): void; + /** + * The **`URLSearchParams.sort()`** method sorts all key/value pairs contained in this object in place and returns `undefined`. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/URLSearchParams/sort) + */ + sort(): void; + /* Returns an array of key, value pairs for every entry in the search params. */ + entries(): IterableIterator<[key: string, value: string]>; + /* Returns a list of keys in the search params. */ + keys(): IterableIterator; + /* Returns a list of values in the search params. */ + values(): IterableIterator; + forEach( + callback: (this: This, value: string, key: string, parent: URLSearchParams) => void, + thisArg?: This, + ): void; + /*function toString() { [native code] }*/ + toString(): string; + [Symbol.iterator](): IterableIterator<[key: string, value: string]>; +} +declare class URLPattern { + constructor( + input?: string | URLPatternInit, + baseURL?: string | URLPatternOptions, + patternOptions?: URLPatternOptions, + ); + get protocol(): string; + get username(): string; + get password(): string; + get hostname(): string; + get port(): string; + get pathname(): string; + get search(): string; + get hash(): string; + get hasRegExpGroups(): boolean; + test(input?: string | URLPatternInit, baseURL?: string): boolean; + exec(input?: string | URLPatternInit, baseURL?: string): URLPatternResult | null; +} +interface URLPatternInit { + protocol?: string; + username?: string; + password?: string; + hostname?: string; + port?: string; + pathname?: string; + search?: string; + hash?: string; + baseURL?: string; +} +interface URLPatternComponentResult { + input: string; + groups: Record; +} +interface URLPatternResult { + inputs: (string | URLPatternInit)[]; + protocol: URLPatternComponentResult; + username: URLPatternComponentResult; + password: URLPatternComponentResult; + hostname: URLPatternComponentResult; + port: URLPatternComponentResult; + pathname: URLPatternComponentResult; + search: URLPatternComponentResult; + hash: URLPatternComponentResult; +} +interface URLPatternOptions { + ignoreCase?: boolean; +} +/** + * A `CloseEvent` is sent to clients using WebSockets when the connection is closed. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent) + */ +declare class CloseEvent extends Event { + constructor(type: string, initializer?: CloseEventInit); + /** + * The **`code`** read-only property of the CloseEvent interface returns a WebSocket connection close code indicating the reason the connection was closed. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/code) + */ + readonly code: number; + /** + * The **`reason`** read-only property of the CloseEvent interface returns the WebSocket connection close reason the server gave for closing the connection; that is, a concise human-readable prose explanation for the closure. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/reason) + */ + readonly reason: string; + /** + * The **`wasClean`** read-only property of the CloseEvent interface returns `true` if the connection closed cleanly. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/CloseEvent/wasClean) + */ + readonly wasClean: boolean; +} +interface CloseEventInit { + code?: number; + reason?: string; + wasClean?: boolean; +} +type WebSocketEventMap = { + close: CloseEvent; + message: MessageEvent; + open: Event; + error: ErrorEvent; +}; +/** + * The `WebSocket` object provides the API for creating and managing a WebSocket connection to a server, as well as for sending and receiving data on the connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket) + */ +declare var WebSocket: { + prototype: WebSocket; + new (url: string, protocols?: string[] | string): WebSocket; + readonly READY_STATE_CONNECTING: number; + readonly CONNECTING: number; + readonly READY_STATE_OPEN: number; + readonly OPEN: number; + readonly READY_STATE_CLOSING: number; + readonly CLOSING: number; + readonly READY_STATE_CLOSED: number; + readonly CLOSED: number; +}; +/** + * The `WebSocket` object provides the API for creating and managing a WebSocket connection to a server, as well as for sending and receiving data on the connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket) + */ +interface WebSocket extends EventTarget { + accept(): void; + /** + * The **`WebSocket.send()`** method enqueues the specified data to be transmitted to the server over the WebSocket connection, increasing the value of `bufferedAmount` by the number of bytes needed to contain the data. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/send) + */ + send(message: (ArrayBuffer | ArrayBufferView) | string): void; + /** + * The **`WebSocket.close()`** method closes the already `CLOSED`, this method does nothing. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/close) + */ + close(code?: number, reason?: string): void; + serializeAttachment(attachment: any): void; + deserializeAttachment(): any | null; + /** + * The **`WebSocket.readyState`** read-only property returns the current state of the WebSocket connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/readyState) + */ + readyState: number; + /** + * The **`WebSocket.url`** read-only property returns the absolute URL of the WebSocket as resolved by the constructor. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/url) + */ + url: string | null; + /** + * The **`WebSocket.protocol`** read-only property returns the name of the sub-protocol the server selected; this will be one of the strings specified in the `protocols` parameter when creating the WebSocket object, or the empty string if no connection is established. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/protocol) + */ + protocol: string | null; + /** + * The **`WebSocket.extensions`** read-only property returns the extensions selected by the server. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/extensions) + */ + extensions: string | null; + /** + * The **`WebSocket.binaryType`** property controls the type of binary data being received over the WebSocket connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/WebSocket/binaryType) + */ + binaryType: 'blob' | 'arraybuffer'; +} +declare const WebSocketPair: { + new (): { + 0: WebSocket; + 1: WebSocket; + }; +}; +interface SqlStorage { + exec>(query: string, ...bindings: any[]): SqlStorageCursor; + get databaseSize(): number; + Cursor: typeof SqlStorageCursor; + Statement: typeof SqlStorageStatement; +} +declare abstract class SqlStorageStatement {} +type SqlStorageValue = ArrayBuffer | string | number | null; +declare abstract class SqlStorageCursor> { + next(): + | { + done?: false; + value: T; + } + | { + done: true; + value?: never; + }; + toArray(): T[]; + one(): T; + raw(): IterableIterator; + columnNames: string[]; + get rowsRead(): number; + get rowsWritten(): number; + [Symbol.iterator](): IterableIterator; +} +interface Socket { + get readable(): ReadableStream; + get writable(): WritableStream; + get closed(): Promise; + get opened(): Promise; + get upgraded(): boolean; + get secureTransport(): 'on' | 'off' | 'starttls'; + close(): Promise; + startTls(options?: TlsOptions): Socket; +} +interface SocketOptions { + secureTransport?: string; + allowHalfOpen: boolean; + highWaterMark?: number | bigint; +} +interface SocketAddress { + hostname: string; + port: number; +} +interface TlsOptions { + expectedServerHostname?: string; +} +interface SocketInfo { + remoteAddress?: string; + localAddress?: string; +} +/** + * The **`EventSource`** interface is web content's interface to server-sent events. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource) + */ +declare class EventSource extends EventTarget { + constructor(url: string, init?: EventSourceEventSourceInit); + /** + * The **`close()`** method of the EventSource interface closes the connection, if one is made, and sets the ```js-nolint close() ``` None. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/close) + */ + close(): void; + /** + * The **`url`** read-only property of the URL of the source. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/url) + */ + get url(): string; + /** + * The **`withCredentials`** read-only property of the the `EventSource` object was instantiated with CORS credentials set. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/withCredentials) + */ + get withCredentials(): boolean; + /** + * The **`readyState`** read-only property of the connection. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/readyState) + */ + get readyState(): number; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/open_event) */ + get onopen(): any | null; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/open_event) */ + set onopen(value: any | null); + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/message_event) */ + get onmessage(): any | null; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/message_event) */ + set onmessage(value: any | null); + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/error_event) */ + get onerror(): any | null; + /* [MDN Reference](https://developer.mozilla.org/docs/Web/API/EventSource/error_event) */ + set onerror(value: any | null); + static readonly CONNECTING: number; + static readonly OPEN: number; + static readonly CLOSED: number; + static from(stream: ReadableStream): EventSource; +} +interface EventSourceEventSourceInit { + withCredentials?: boolean; + fetcher?: Fetcher; +} +interface Container { + get running(): boolean; + start(options?: ContainerStartupOptions): void; + monitor(): Promise; + destroy(error?: any): Promise; + signal(signo: number): void; + getTcpPort(port: number): Fetcher; + setInactivityTimeout(durationMs: number | bigint): Promise; + interceptOutboundHttp(addr: string, binding: Fetcher): Promise; + interceptAllOutboundHttp(binding: Fetcher): Promise; +} +interface ContainerStartupOptions { + entrypoint?: string[]; + enableInternet: boolean; + env?: Record; + hardTimeout?: number | bigint; +} +/** + * The **`MessagePort`** interface of the Channel Messaging API represents one of the two ports of a MessageChannel, allowing messages to be sent from one port and listening out for them arriving at the other. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort) + */ +declare abstract class MessagePort extends EventTarget { + /** + * The **`postMessage()`** method of the transfers ownership of objects to other browsing contexts. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/postMessage) + */ + postMessage(data?: any, options?: any[] | MessagePortPostMessageOptions): void; + /** + * The **`close()`** method of the MessagePort interface disconnects the port, so it is no longer active. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/close) + */ + close(): void; + /** + * The **`start()`** method of the MessagePort interface starts the sending of messages queued on the port. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessagePort/start) + */ + start(): void; + get onmessage(): any | null; + set onmessage(value: any | null); +} +/** + * The **`MessageChannel`** interface of the Channel Messaging API allows us to create a new message channel and send data through it via its two MessagePort properties. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel) + */ +declare class MessageChannel { + constructor(); + /** + * The **`port1`** read-only property of the the port attached to the context that originated the channel. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel/port1) + */ + readonly port1: MessagePort; + /** + * The **`port2`** read-only property of the the port attached to the context at the other end of the channel, which the message is initially sent to. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/MessageChannel/port2) + */ + readonly port2: MessagePort; +} +interface MessagePortPostMessageOptions { + transfer?: any[]; +} +type LoopbackForExport< + T extends (new (...args: any[]) => Rpc.EntrypointBranded) | ExportedHandler | undefined = undefined, +> = T extends new (...args: any[]) => Rpc.WorkerEntrypointBranded + ? LoopbackServiceStub> + : T extends new (...args: any[]) => Rpc.DurableObjectBranded + ? LoopbackDurableObjectClass> + : T extends ExportedHandler + ? LoopbackServiceStub + : undefined; +type LoopbackServiceStub = Fetcher & + (T extends CloudflareWorkersModule.WorkerEntrypoint + ? (opts: { props?: Props }) => Fetcher + : (opts: { props?: any }) => Fetcher); +type LoopbackDurableObjectClass = DurableObjectClass & + (T extends CloudflareWorkersModule.DurableObject + ? (opts: { props?: Props }) => DurableObjectClass + : (opts: { props?: any }) => DurableObjectClass); +interface SyncKvStorage { + get(key: string): T | undefined; + list(options?: SyncKvListOptions): Iterable<[string, T]>; + put(key: string, value: T): void; + delete(key: string): boolean; +} +interface SyncKvListOptions { + start?: string; + startAfter?: string; + end?: string; + prefix?: string; + reverse?: boolean; + limit?: number; +} +interface WorkerStub { + getEntrypoint( + name?: string, + options?: WorkerStubEntrypointOptions, + ): Fetcher; +} +interface WorkerStubEntrypointOptions { + props?: any; +} +interface WorkerLoader { + get(name: string | null, getCode: () => WorkerLoaderWorkerCode | Promise): WorkerStub; +} +interface WorkerLoaderModule { + js?: string; + cjs?: string; + text?: string; + data?: ArrayBuffer; + json?: any; + py?: string; + wasm?: ArrayBuffer; +} +interface WorkerLoaderWorkerCode { + compatibilityDate: string; + compatibilityFlags?: string[]; + allowExperimental?: boolean; + mainModule: string; + modules: Record; + env?: any; + globalOutbound?: Fetcher | null; + tails?: Fetcher[]; + streamingTails?: Fetcher[]; +} +/** + * The Workers runtime supports a subset of the Performance API, used to measure timing and performance, + * as well as timing of subrequests and other operations. + * + * [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/) + */ +declare abstract class Performance { + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancetimeorigin) */ + get timeOrigin(): number; + /* [Cloudflare Docs Reference](https://developers.cloudflare.com/workers/runtime-apis/performance/#performancenow) */ + now(): number; + /** + * The **`toJSON()`** method of the Performance interface is a Serialization; it returns a JSON representation of the Performance object. + * + * [MDN Reference](https://developer.mozilla.org/docs/Web/API/Performance/toJSON) + */ + toJSON(): object; +} +// AI Search V2 API Error Interfaces +interface AiSearchInternalError extends Error {} +interface AiSearchNotFoundError extends Error {} +interface AiSearchNameNotSetError extends Error {} +// AI Search V2 Request Types +type AiSearchSearchRequest = { + messages: Array<{ + role: 'system' | 'developer' | 'user' | 'assistant' | 'tool'; + content: string | null; + }>; + ai_search_options?: { + retrieval?: { + retrieval_type?: 'vector' | 'keyword' | 'hybrid'; + /** Match threshold (0-1, default 0.4) */ + match_threshold?: number; + /** Maximum number of results (1-50, default 10) */ + max_num_results?: number; + filters?: VectorizeVectorMetadataFilter; + /** Context expansion (0-3, default 0) */ + context_expansion?: number; + [key: string]: unknown; + }; + query_rewrite?: { + enabled?: boolean; + model?: string; + rewrite_prompt?: string; + [key: string]: unknown; + }; + reranking?: { + /** Enable reranking (default false) */ + enabled?: boolean; + model?: '@cf/baai/bge-reranker-base' | ''; + /** Match threshold (0-1, default 0.4) */ + match_threshold?: number; + [key: string]: unknown; + }; + [key: string]: unknown; + }; +}; +type AiSearchChatCompletionsRequest = { + messages: Array<{ + role: 'system' | 'developer' | 'user' | 'assistant' | 'tool'; + content: string | null; + }>; + model?: string; + stream?: boolean; + ai_search_options?: { + retrieval?: { + retrieval_type?: 'vector' | 'keyword' | 'hybrid'; + match_threshold?: number; + max_num_results?: number; + filters?: VectorizeVectorMetadataFilter; + context_expansion?: number; + [key: string]: unknown; + }; + query_rewrite?: { + enabled?: boolean; + model?: string; + rewrite_prompt?: string; + [key: string]: unknown; + }; + reranking?: { + enabled?: boolean; + model?: '@cf/baai/bge-reranker-base' | ''; + match_threshold?: number; + [key: string]: unknown; + }; + [key: string]: unknown; + }; + [key: string]: unknown; +}; +// AI Search V2 Response Types +type AiSearchSearchResponse = { + search_query: string; + chunks: Array<{ + id: string; + type: string; + /** Match score (0-1) */ + score: number; + text: string; + item: { + timestamp?: number; + key: string; + metadata?: Record; + }; + scoring_details?: { + /** Keyword match score (0-1) */ + keyword_score?: number; + /** Vector similarity score (0-1) */ + vector_score?: number; + }; + }>; +}; +type AiSearchListResponse = Array<{ + id: string; + internal_id?: string; + account_id?: string; + account_tag?: string; + /** Whether the instance is enabled (default true) */ + enable?: boolean; + type?: 'r2' | 'web-crawler'; + source?: string; + [key: string]: unknown; +}>; +type AiSearchConfig = { + /** Instance ID (1-32 chars, pattern: ^[a-z0-9_]+(?:-[a-z0-9_]+)*$) */ + id: string; + type: 'r2' | 'web-crawler'; + source: string; + source_params?: object; + /** Token ID (UUID format) */ + token_id?: string; + ai_gateway_id?: string; + /** Enable query rewriting (default false) */ + rewrite_query?: boolean; + /** Enable reranking (default false) */ + reranking?: boolean; + embedding_model?: string; + ai_search_model?: string; +}; +type AiSearchInstance = { + id: string; + enable?: boolean; + type?: 'r2' | 'web-crawler'; + source?: string; + [key: string]: unknown; +}; +// AI Search Instance Service - Instance-level operations +declare abstract class AiSearchInstanceService { + /** + * Search the AI Search instance for relevant chunks. + * @param params Search request with messages and AI search options + * @returns Search response with matching chunks + */ + search(params: AiSearchSearchRequest): Promise; + /** + * Generate chat completions with AI Search context. + * @param params Chat completions request with optional streaming + * @returns Response object (if streaming) or chat completion result + */ + chatCompletions(params: AiSearchChatCompletionsRequest): Promise; + /** + * Delete this AI Search instance. + */ + delete(): Promise; +} +// AI Search Account Service - Account-level operations +declare abstract class AiSearchAccountService { + /** + * List all AI Search instances in the account. + * @returns Array of AI Search instances + */ + list(): Promise; + /** + * Get an AI Search instance by ID. + * @param name Instance ID + * @returns Instance service for performing operations + */ + get(name: string): AiSearchInstanceService; + /** + * Create a new AI Search instance. + * @param config Instance configuration + * @returns Instance service for performing operations + */ + create(config: AiSearchConfig): Promise; +} +type AiImageClassificationInput = { + image: number[]; +}; +type AiImageClassificationOutput = { + score?: number; + label?: string; +}[]; +declare abstract class BaseAiImageClassification { + inputs: AiImageClassificationInput; + postProcessedOutputs: AiImageClassificationOutput; +} +type AiImageToTextInput = { + image: number[]; + prompt?: string; + max_tokens?: number; + temperature?: number; + top_p?: number; + top_k?: number; + seed?: number; + repetition_penalty?: number; + frequency_penalty?: number; + presence_penalty?: number; + raw?: boolean; + messages?: RoleScopedChatInput[]; +}; +type AiImageToTextOutput = { + description: string; +}; +declare abstract class BaseAiImageToText { + inputs: AiImageToTextInput; + postProcessedOutputs: AiImageToTextOutput; +} +type AiImageTextToTextInput = { + image: string; + prompt?: string; + max_tokens?: number; + temperature?: number; + ignore_eos?: boolean; + top_p?: number; + top_k?: number; + seed?: number; + repetition_penalty?: number; + frequency_penalty?: number; + presence_penalty?: number; + raw?: boolean; + messages?: RoleScopedChatInput[]; +}; +type AiImageTextToTextOutput = { + description: string; +}; +declare abstract class BaseAiImageTextToText { + inputs: AiImageTextToTextInput; + postProcessedOutputs: AiImageTextToTextOutput; +} +type AiMultimodalEmbeddingsInput = { + image: string; + text: string[]; +}; +type AiIMultimodalEmbeddingsOutput = { + data: number[][]; + shape: number[]; +}; +declare abstract class BaseAiMultimodalEmbeddings { + inputs: AiImageTextToTextInput; + postProcessedOutputs: AiImageTextToTextOutput; +} +type AiObjectDetectionInput = { + image: number[]; +}; +type AiObjectDetectionOutput = { + score?: number; + label?: string; +}[]; +declare abstract class BaseAiObjectDetection { + inputs: AiObjectDetectionInput; + postProcessedOutputs: AiObjectDetectionOutput; +} +type AiSentenceSimilarityInput = { + source: string; + sentences: string[]; +}; +type AiSentenceSimilarityOutput = number[]; +declare abstract class BaseAiSentenceSimilarity { + inputs: AiSentenceSimilarityInput; + postProcessedOutputs: AiSentenceSimilarityOutput; +} +type AiAutomaticSpeechRecognitionInput = { + audio: number[]; +}; +type AiAutomaticSpeechRecognitionOutput = { + text?: string; + words?: { + word: string; + start: number; + end: number; + }[]; + vtt?: string; +}; +declare abstract class BaseAiAutomaticSpeechRecognition { + inputs: AiAutomaticSpeechRecognitionInput; + postProcessedOutputs: AiAutomaticSpeechRecognitionOutput; +} +type AiSummarizationInput = { + input_text: string; + max_length?: number; +}; +type AiSummarizationOutput = { + summary: string; +}; +declare abstract class BaseAiSummarization { + inputs: AiSummarizationInput; + postProcessedOutputs: AiSummarizationOutput; +} +type AiTextClassificationInput = { + text: string; +}; +type AiTextClassificationOutput = { + score?: number; + label?: string; +}[]; +declare abstract class BaseAiTextClassification { + inputs: AiTextClassificationInput; + postProcessedOutputs: AiTextClassificationOutput; +} +type AiTextEmbeddingsInput = { + text: string | string[]; +}; +type AiTextEmbeddingsOutput = { + shape: number[]; + data: number[][]; +}; +declare abstract class BaseAiTextEmbeddings { + inputs: AiTextEmbeddingsInput; + postProcessedOutputs: AiTextEmbeddingsOutput; +} +type RoleScopedChatInput = { + role: 'user' | 'assistant' | 'system' | 'tool' | (string & NonNullable); + content: string; + name?: string; +}; +type AiTextGenerationToolLegacyInput = { + name: string; + description: string; + parameters?: { + type: 'object' | (string & NonNullable); + properties: { + [key: string]: { + type: string; + description?: string; + }; + }; + required: string[]; + }; +}; +type AiTextGenerationToolInput = { + type: 'function' | (string & NonNullable); + function: { + name: string; + description: string; + parameters?: { + type: 'object' | (string & NonNullable); + properties: { + [key: string]: { + type: string; + description?: string; + }; + }; + required: string[]; + }; + }; +}; +type AiTextGenerationFunctionsInput = { + name: string; + code: string; +}; +type AiTextGenerationResponseFormat = { + type: string; + json_schema?: any; +}; +type AiTextGenerationInput = { + prompt?: string; + raw?: boolean; + stream?: boolean; + max_tokens?: number; + temperature?: number; + top_p?: number; + top_k?: number; + seed?: number; + repetition_penalty?: number; + frequency_penalty?: number; + presence_penalty?: number; + messages?: RoleScopedChatInput[]; + response_format?: AiTextGenerationResponseFormat; + tools?: AiTextGenerationToolInput[] | AiTextGenerationToolLegacyInput[] | (object & NonNullable); + functions?: AiTextGenerationFunctionsInput[]; +}; +type AiTextGenerationToolLegacyOutput = { + name: string; + arguments: unknown; +}; +type AiTextGenerationToolOutput = { + id: string; + type: 'function'; + function: { + name: string; + arguments: string; + }; +}; +type UsageTags = { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; +}; +type AiTextGenerationOutput = { + response?: string; + tool_calls?: AiTextGenerationToolLegacyOutput[] & AiTextGenerationToolOutput[]; + usage?: UsageTags; +}; +declare abstract class BaseAiTextGeneration { + inputs: AiTextGenerationInput; + postProcessedOutputs: AiTextGenerationOutput; +} +type AiTextToSpeechInput = { + prompt: string; + lang?: string; +}; +type AiTextToSpeechOutput = + | Uint8Array + | { + audio: string; + }; +declare abstract class BaseAiTextToSpeech { + inputs: AiTextToSpeechInput; + postProcessedOutputs: AiTextToSpeechOutput; +} +type AiTextToImageInput = { + prompt: string; + negative_prompt?: string; + height?: number; + width?: number; + image?: number[]; + image_b64?: string; + mask?: number[]; + num_steps?: number; + strength?: number; + guidance?: number; + seed?: number; +}; +type AiTextToImageOutput = ReadableStream; +declare abstract class BaseAiTextToImage { + inputs: AiTextToImageInput; + postProcessedOutputs: AiTextToImageOutput; +} +type AiTranslationInput = { + text: string; + target_lang: string; + source_lang?: string; +}; +type AiTranslationOutput = { + translated_text?: string; +}; +declare abstract class BaseAiTranslation { + inputs: AiTranslationInput; + postProcessedOutputs: AiTranslationOutput; +} +/** + * Workers AI support for OpenAI's Responses API + * Reference: https://github.com/openai/openai-node/blob/master/src/resources/responses/responses.ts + * + * It's a stripped down version from its source. + * It currently supports basic function calling, json mode and accepts images as input. + * + * It does not include types for WebSearch, CodeInterpreter, FileInputs, MCP, CustomTools. + * We plan to add those incrementally as model + platform capabilities evolve. + */ +type ResponsesInput = { + background?: boolean | null; + conversation?: string | ResponseConversationParam | null; + include?: Array | null; + input?: string | ResponseInput; + instructions?: string | null; + max_output_tokens?: number | null; + parallel_tool_calls?: boolean | null; + previous_response_id?: string | null; + prompt_cache_key?: string; + reasoning?: Reasoning | null; + safety_identifier?: string; + service_tier?: 'auto' | 'default' | 'flex' | 'scale' | 'priority' | null; + stream?: boolean | null; + stream_options?: StreamOptions | null; + temperature?: number | null; + text?: ResponseTextConfig; + tool_choice?: ToolChoiceOptions | ToolChoiceFunction; + tools?: Array; + top_p?: number | null; + truncation?: 'auto' | 'disabled' | null; +}; +type ResponsesOutput = { + id?: string; + created_at?: number; + output_text?: string; + error?: ResponseError | null; + incomplete_details?: ResponseIncompleteDetails | null; + instructions?: string | Array | null; + object?: 'response'; + output?: Array; + parallel_tool_calls?: boolean; + temperature?: number | null; + tool_choice?: ToolChoiceOptions | ToolChoiceFunction; + tools?: Array; + top_p?: number | null; + max_output_tokens?: number | null; + previous_response_id?: string | null; + prompt?: ResponsePrompt | null; + reasoning?: Reasoning | null; + safety_identifier?: string; + service_tier?: 'auto' | 'default' | 'flex' | 'scale' | 'priority' | null; + status?: ResponseStatus; + text?: ResponseTextConfig; + truncation?: 'auto' | 'disabled' | null; + usage?: ResponseUsage; +}; +type EasyInputMessage = { + content: string | ResponseInputMessageContentList; + role: 'user' | 'assistant' | 'system' | 'developer'; + type?: 'message'; +}; +type ResponsesFunctionTool = { + name: string; + parameters: { + [key: string]: unknown; + } | null; + strict: boolean | null; + type: 'function'; + description?: string | null; +}; +type ResponseIncompleteDetails = { + reason?: 'max_output_tokens' | 'content_filter'; +}; +type ResponsePrompt = { + id: string; + variables?: { + [key: string]: string | ResponseInputText | ResponseInputImage; + } | null; + version?: string | null; +}; +type Reasoning = { + effort?: ReasoningEffort | null; + generate_summary?: 'auto' | 'concise' | 'detailed' | null; + summary?: 'auto' | 'concise' | 'detailed' | null; +}; +type ResponseContent = + | ResponseInputText + | ResponseInputImage + | ResponseOutputText + | ResponseOutputRefusal + | ResponseContentReasoningText; +type ResponseContentReasoningText = { + text: string; + type: 'reasoning_text'; +}; +type ResponseConversationParam = { + id: string; +}; +type ResponseCreatedEvent = { + response: Response; + sequence_number: number; + type: 'response.created'; +}; +type ResponseCustomToolCallOutput = { + call_id: string; + output: string | Array; + type: 'custom_tool_call_output'; + id?: string; +}; +type ResponseError = { + code: + | 'server_error' + | 'rate_limit_exceeded' + | 'invalid_prompt' + | 'vector_store_timeout' + | 'invalid_image' + | 'invalid_image_format' + | 'invalid_base64_image' + | 'invalid_image_url' + | 'image_too_large' + | 'image_too_small' + | 'image_parse_error' + | 'image_content_policy_violation' + | 'invalid_image_mode' + | 'image_file_too_large' + | 'unsupported_image_media_type' + | 'empty_image_file' + | 'failed_to_download_image' + | 'image_file_not_found'; + message: string; +}; +type ResponseErrorEvent = { + code: string | null; + message: string; + param: string | null; + sequence_number: number; + type: 'error'; +}; +type ResponseFailedEvent = { + response: Response; + sequence_number: number; + type: 'response.failed'; +}; +type ResponseFormatText = { + type: 'text'; +}; +type ResponseFormatJSONObject = { + type: 'json_object'; +}; +type ResponseFormatTextConfig = ResponseFormatText | ResponseFormatTextJSONSchemaConfig | ResponseFormatJSONObject; +type ResponseFormatTextJSONSchemaConfig = { + name: string; + schema: { + [key: string]: unknown; + }; + type: 'json_schema'; + description?: string; + strict?: boolean | null; +}; +type ResponseFunctionCallArgumentsDeltaEvent = { + delta: string; + item_id: string; + output_index: number; + sequence_number: number; + type: 'response.function_call_arguments.delta'; +}; +type ResponseFunctionCallArgumentsDoneEvent = { + arguments: string; + item_id: string; + name: string; + output_index: number; + sequence_number: number; + type: 'response.function_call_arguments.done'; +}; +type ResponseFunctionCallOutputItem = ResponseInputTextContent | ResponseInputImageContent; +type ResponseFunctionCallOutputItemList = Array; +type ResponseFunctionToolCall = { + arguments: string; + call_id: string; + name: string; + type: 'function_call'; + id?: string; + status?: 'in_progress' | 'completed' | 'incomplete'; +}; +interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall { + id: string; +} +type ResponseFunctionToolCallOutputItem = { + id: string; + call_id: string; + output: string | Array; + type: 'function_call_output'; + status?: 'in_progress' | 'completed' | 'incomplete'; +}; +type ResponseIncludable = 'message.input_image.image_url' | 'message.output_text.logprobs'; +type ResponseIncompleteEvent = { + response: Response; + sequence_number: number; + type: 'response.incomplete'; +}; +type ResponseInput = Array; +type ResponseInputContent = ResponseInputText | ResponseInputImage; +type ResponseInputImage = { + detail: 'low' | 'high' | 'auto'; + type: 'input_image'; + /** + * Base64 encoded image + */ + image_url?: string | null; +}; +type ResponseInputImageContent = { + type: 'input_image'; + detail?: 'low' | 'high' | 'auto' | null; + /** + * Base64 encoded image + */ + image_url?: string | null; +}; +type ResponseInputItem = + | EasyInputMessage + | ResponseInputItemMessage + | ResponseOutputMessage + | ResponseFunctionToolCall + | ResponseInputItemFunctionCallOutput + | ResponseReasoningItem; +type ResponseInputItemFunctionCallOutput = { + call_id: string; + output: string | ResponseFunctionCallOutputItemList; + type: 'function_call_output'; + id?: string | null; + status?: 'in_progress' | 'completed' | 'incomplete' | null; +}; +type ResponseInputItemMessage = { + content: ResponseInputMessageContentList; + role: 'user' | 'system' | 'developer'; + status?: 'in_progress' | 'completed' | 'incomplete'; + type?: 'message'; +}; +type ResponseInputMessageContentList = Array; +type ResponseInputMessageItem = { + id: string; + content: ResponseInputMessageContentList; + role: 'user' | 'system' | 'developer'; + status?: 'in_progress' | 'completed' | 'incomplete'; + type?: 'message'; +}; +type ResponseInputText = { + text: string; + type: 'input_text'; +}; +type ResponseInputTextContent = { + text: string; + type: 'input_text'; +}; +type ResponseItem = + | ResponseInputMessageItem + | ResponseOutputMessage + | ResponseFunctionToolCallItem + | ResponseFunctionToolCallOutputItem; +type ResponseOutputItem = ResponseOutputMessage | ResponseFunctionToolCall | ResponseReasoningItem; +type ResponseOutputItemAddedEvent = { + item: ResponseOutputItem; + output_index: number; + sequence_number: number; + type: 'response.output_item.added'; +}; +type ResponseOutputItemDoneEvent = { + item: ResponseOutputItem; + output_index: number; + sequence_number: number; + type: 'response.output_item.done'; +}; +type ResponseOutputMessage = { + id: string; + content: Array; + role: 'assistant'; + status: 'in_progress' | 'completed' | 'incomplete'; + type: 'message'; +}; +type ResponseOutputRefusal = { + refusal: string; + type: 'refusal'; +}; +type ResponseOutputText = { + text: string; + type: 'output_text'; + logprobs?: Array; +}; +type ResponseReasoningItem = { + id: string; + summary: Array; + type: 'reasoning'; + content?: Array; + encrypted_content?: string | null; + status?: 'in_progress' | 'completed' | 'incomplete'; +}; +type ResponseReasoningSummaryItem = { + text: string; + type: 'summary_text'; +}; +type ResponseReasoningContentItem = { + text: string; + type: 'reasoning_text'; +}; +type ResponseReasoningTextDeltaEvent = { + content_index: number; + delta: string; + item_id: string; + output_index: number; + sequence_number: number; + type: 'response.reasoning_text.delta'; +}; +type ResponseReasoningTextDoneEvent = { + content_index: number; + item_id: string; + output_index: number; + sequence_number: number; + text: string; + type: 'response.reasoning_text.done'; +}; +type ResponseRefusalDeltaEvent = { + content_index: number; + delta: string; + item_id: string; + output_index: number; + sequence_number: number; + type: 'response.refusal.delta'; +}; +type ResponseRefusalDoneEvent = { + content_index: number; + item_id: string; + output_index: number; + refusal: string; + sequence_number: number; + type: 'response.refusal.done'; +}; +type ResponseStatus = 'completed' | 'failed' | 'in_progress' | 'cancelled' | 'queued' | 'incomplete'; +type ResponseStreamEvent = + | ResponseCompletedEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseReasoningTextDeltaEvent + | ResponseReasoningTextDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent; +type ResponseCompletedEvent = { + response: Response; + sequence_number: number; + type: 'response.completed'; +}; +type ResponseTextConfig = { + format?: ResponseFormatTextConfig; + verbosity?: 'low' | 'medium' | 'high' | null; +}; +type ResponseTextDeltaEvent = { + content_index: number; + delta: string; + item_id: string; + logprobs: Array; + output_index: number; + sequence_number: number; + type: 'response.output_text.delta'; +}; +type ResponseTextDoneEvent = { + content_index: number; + item_id: string; + logprobs: Array; + output_index: number; + sequence_number: number; + text: string; + type: 'response.output_text.done'; +}; +type Logprob = { + token: string; + logprob: number; + top_logprobs?: Array; +}; +type TopLogprob = { + token?: string; + logprob?: number; +}; +type ResponseUsage = { + input_tokens: number; + output_tokens: number; + total_tokens: number; +}; +type Tool = ResponsesFunctionTool; +type ToolChoiceFunction = { + name: string; + type: 'function'; +}; +type ToolChoiceOptions = 'none'; +type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | null; +type StreamOptions = { + include_obfuscation?: boolean; +}; +type Ai_Cf_Baai_Bge_Base_En_V1_5_Input = + | { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: 'mean' | 'cls'; + } + | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: 'mean' | 'cls'; + }[]; + }; +type Ai_Cf_Baai_Bge_Base_En_V1_5_Output = + | { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: 'mean' | 'cls'; + } + | Ai_Cf_Baai_Bge_Base_En_V1_5_AsyncResponse; +interface Ai_Cf_Baai_Bge_Base_En_V1_5_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Base_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Base_En_V1_5_Output; +} +type Ai_Cf_Openai_Whisper_Input = + | string + | { + /** + * An array of integers that represent the audio data constrained to 8-bit unsigned integer values + */ + audio: number[]; + }; +interface Ai_Cf_Openai_Whisper_Output { + /** + * The transcription + */ + text: string; + word_count?: number; + words?: { + word?: string; + /** + * The second this word begins in the recording + */ + start?: number; + /** + * The ending second when the word completes + */ + end?: number; + }[]; + vtt?: string; +} +declare abstract class Base_Ai_Cf_Openai_Whisper { + inputs: Ai_Cf_Openai_Whisper_Input; + postProcessedOutputs: Ai_Cf_Openai_Whisper_Output; +} +type Ai_Cf_Meta_M2M100_1_2B_Input = + | { + /** + * The text to be translated + */ + text: string; + /** + * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified + */ + source_lang?: string; + /** + * The language code to translate the text into (e.g., 'es' for Spanish) + */ + target_lang: string; + } + | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + /** + * The text to be translated + */ + text: string; + /** + * The language code of the source text (e.g., 'en' for English). Defaults to 'en' if not specified + */ + source_lang?: string; + /** + * The language code to translate the text into (e.g., 'es' for Spanish) + */ + target_lang: string; + }[]; + }; +type Ai_Cf_Meta_M2M100_1_2B_Output = + | { + /** + * The translated text in the target language + */ + translated_text?: string; + } + | Ai_Cf_Meta_M2M100_1_2B_AsyncResponse; +interface Ai_Cf_Meta_M2M100_1_2B_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Meta_M2M100_1_2B { + inputs: Ai_Cf_Meta_M2M100_1_2B_Input; + postProcessedOutputs: Ai_Cf_Meta_M2M100_1_2B_Output; +} +type Ai_Cf_Baai_Bge_Small_En_V1_5_Input = + | { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: 'mean' | 'cls'; + } + | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: 'mean' | 'cls'; + }[]; + }; +type Ai_Cf_Baai_Bge_Small_En_V1_5_Output = + | { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: 'mean' | 'cls'; + } + | Ai_Cf_Baai_Bge_Small_En_V1_5_AsyncResponse; +interface Ai_Cf_Baai_Bge_Small_En_V1_5_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Small_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Small_En_V1_5_Output; +} +type Ai_Cf_Baai_Bge_Large_En_V1_5_Input = + | { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: 'mean' | 'cls'; + } + | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: { + text: string | string[]; + /** + * The pooling method used in the embedding process. `cls` pooling will generate more accurate embeddings on larger inputs - however, embeddings created with cls pooling are not compatible with embeddings generated with mean pooling. The default pooling method is `mean` in order for this to not be a breaking change, but we highly suggest using the new `cls` pooling for better accuracy. + */ + pooling?: 'mean' | 'cls'; + }[]; + }; +type Ai_Cf_Baai_Bge_Large_En_V1_5_Output = + | { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: 'mean' | 'cls'; + } + | Ai_Cf_Baai_Bge_Large_En_V1_5_AsyncResponse; +interface Ai_Cf_Baai_Bge_Large_En_V1_5_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Large_En_V1_5 { + inputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Large_En_V1_5_Output; +} +type Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input = + | string + | { + /** + * The input text prompt for the model to generate a response. + */ + prompt?: string; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * Controls the creativity of the AI's responses by adjusting how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; + image: number[] | (string & NonNullable); + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + }; +interface Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Output { + description?: string; +} +declare abstract class Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M { + inputs: Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Input; + postProcessedOutputs: Ai_Cf_Unum_Uform_Gen2_Qwen_500M_Output; +} +type Ai_Cf_Openai_Whisper_Tiny_En_Input = + | string + | { + /** + * An array of integers that represent the audio data constrained to 8-bit unsigned integer values + */ + audio: number[]; + }; +interface Ai_Cf_Openai_Whisper_Tiny_En_Output { + /** + * The transcription + */ + text: string; + word_count?: number; + words?: { + word?: string; + /** + * The second this word begins in the recording + */ + start?: number; + /** + * The ending second when the word completes + */ + end?: number; + }[]; + vtt?: string; +} +declare abstract class Base_Ai_Cf_Openai_Whisper_Tiny_En { + inputs: Ai_Cf_Openai_Whisper_Tiny_En_Input; + postProcessedOutputs: Ai_Cf_Openai_Whisper_Tiny_En_Output; +} +interface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input { + /** + * Base64 encoded value of the audio data. + */ + audio: string; + /** + * Supported tasks are 'translate' or 'transcribe'. + */ + task?: string; + /** + * The language of the audio being transcribed or translated. + */ + language?: string; + /** + * Preprocess the audio with a voice activity detection model. + */ + vad_filter?: boolean; + /** + * A text prompt to help provide context to the model on the contents of the audio. + */ + initial_prompt?: string; + /** + * The prefix it appended the the beginning of the output of the transcription and can guide the transcription result. + */ + prefix?: string; +} +interface Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output { + transcription_info?: { + /** + * The language of the audio being transcribed or translated. + */ + language?: string; + /** + * The confidence level or probability of the detected language being accurate, represented as a decimal between 0 and 1. + */ + language_probability?: number; + /** + * The total duration of the original audio file, in seconds. + */ + duration?: number; + /** + * The duration of the audio after applying Voice Activity Detection (VAD) to remove silent or irrelevant sections, in seconds. + */ + duration_after_vad?: number; + }; + /** + * The complete transcription of the audio. + */ + text: string; + /** + * The total number of words in the transcription. + */ + word_count?: number; + segments?: { + /** + * The starting time of the segment within the audio, in seconds. + */ + start?: number; + /** + * The ending time of the segment within the audio, in seconds. + */ + end?: number; + /** + * The transcription of the segment. + */ + text?: string; + /** + * The temperature used in the decoding process, controlling randomness in predictions. Lower values result in more deterministic outputs. + */ + temperature?: number; + /** + * The average log probability of the predictions for the words in this segment, indicating overall confidence. + */ + avg_logprob?: number; + /** + * The compression ratio of the input to the output, measuring how much the text was compressed during the transcription process. + */ + compression_ratio?: number; + /** + * The probability that the segment contains no speech, represented as a decimal between 0 and 1. + */ + no_speech_prob?: number; + words?: { + /** + * The individual word transcribed from the audio. + */ + word?: string; + /** + * The starting time of the word within the audio, in seconds. + */ + start?: number; + /** + * The ending time of the word within the audio, in seconds. + */ + end?: number; + }[]; + }[]; + /** + * The transcription in WebVTT format, which includes timing and text information for use in subtitles. + */ + vtt?: string; +} +declare abstract class Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo { + inputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Input; + postProcessedOutputs: Ai_Cf_Openai_Whisper_Large_V3_Turbo_Output; +} +type Ai_Cf_Baai_Bge_M3_Input = + | Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts + | Ai_Cf_Baai_Bge_M3_Input_Embedding + | { + /** + * Batch of the embeddings requests to run using async-queue + */ + requests: (Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts_1 | Ai_Cf_Baai_Bge_M3_Input_Embedding_1)[]; + }; +interface Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts { + /** + * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts + */ + query?: string; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +interface Ai_Cf_Baai_Bge_M3_Input_Embedding { + text: string | string[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +interface Ai_Cf_Baai_Bge_M3_Input_QueryAnd_Contexts_1 { + /** + * A query you wish to perform against the provided contexts. If no query is provided the model with respond with embeddings for contexts + */ + query?: string; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +interface Ai_Cf_Baai_Bge_M3_Input_Embedding_1 { + text: string | string[]; + /** + * When provided with too long context should the model error out or truncate the context to fit? + */ + truncate_inputs?: boolean; +} +type Ai_Cf_Baai_Bge_M3_Output = + | Ai_Cf_Baai_Bge_M3_Ouput_Query + | Ai_Cf_Baai_Bge_M3_Output_EmbeddingFor_Contexts + | Ai_Cf_Baai_Bge_M3_Ouput_Embedding + | Ai_Cf_Baai_Bge_M3_AsyncResponse; +interface Ai_Cf_Baai_Bge_M3_Ouput_Query { + response?: { + /** + * Index of the context in the request + */ + id?: number; + /** + * Score of the context under the index. + */ + score?: number; + }[]; +} +interface Ai_Cf_Baai_Bge_M3_Output_EmbeddingFor_Contexts { + response?: number[][]; + shape?: number[]; + /** + * The pooling method used in the embedding process. + */ + pooling?: 'mean' | 'cls'; +} +interface Ai_Cf_Baai_Bge_M3_Ouput_Embedding { + shape?: number[]; + /** + * Embeddings of the requested text values + */ + data?: number[][]; + /** + * The pooling method used in the embedding process. + */ + pooling?: 'mean' | 'cls'; +} +interface Ai_Cf_Baai_Bge_M3_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Baai_Bge_M3 { + inputs: Ai_Cf_Baai_Bge_M3_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_M3_Output; +} +interface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input { + /** + * A text description of the image you want to generate. + */ + prompt: string; + /** + * The number of diffusion steps; higher values can improve quality but take longer. + */ + steps?: number; +} +interface Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Output { + /** + * The generated image in Base64 format. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell { + inputs: Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Input; + postProcessedOutputs: Ai_Cf_Black_Forest_Labs_Flux_1_Schnell_Output; +} +type Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input = + | Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Prompt + | Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Messages; +interface Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + image?: number[] | (string & NonNullable); + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; +} +interface Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: + | string + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + image?: number[] | (string & NonNullable); + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + /** + * If true, the response will be streamed back incrementally. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Controls the creativity of the AI's responses by adjusting how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output = { + /** + * The generated text response from the model + */ + response?: string; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct { + inputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct_Output; +} +type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input = + | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt + | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages + | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Async_Batch; +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + /** + * The content of the message as a string. + */ + content: string; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_1; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_1 { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Async_Batch { + requests?: { + /** + * User-supplied reference. This field will be present in the response as well it can be used to reference the request and response. It's NOT validated to be unique. + */ + external_reference?: string; + /** + * Prompt for the text generation model + */ + prompt?: string; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; + response_format?: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_2; + }[]; +} +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_JSON_Mode_2 { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +type Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output = + | { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; + } + | string + | Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_AsyncResponse; +interface Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast { + inputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast_Output; +} +interface Ai_Cf_Meta_Llama_Guard_3_8B_Input { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender must alternate between 'user' and 'assistant'. + */ + role: 'user' | 'assistant'; + /** + * The content of the message as a string. + */ + content: string; + }[]; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Dictate the output format of the generated response. + */ + response_format?: { + /** + * Set to json_object to process and output generated text as JSON. + */ + type?: string; + }; +} +interface Ai_Cf_Meta_Llama_Guard_3_8B_Output { + response?: + | string + | { + /** + * Whether the conversation is safe or not. + */ + safe?: boolean; + /** + * A list of what hazard categories predicted for the conversation, if the conversation is deemed unsafe. + */ + categories?: string[]; + }; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; +} +declare abstract class Base_Ai_Cf_Meta_Llama_Guard_3_8B { + inputs: Ai_Cf_Meta_Llama_Guard_3_8B_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_Guard_3_8B_Output; +} +interface Ai_Cf_Baai_Bge_Reranker_Base_Input { + /** + * A query you wish to perform against the provided contexts. + */ + /** + * Number of returned results starting with the best score. + */ + top_k?: number; + /** + * List of provided contexts. Note that the index in this array is important, as the response will refer to it. + */ + contexts: { + /** + * One of the provided context content + */ + text?: string; + }[]; +} +interface Ai_Cf_Baai_Bge_Reranker_Base_Output { + response?: { + /** + * Index of the context in the request + */ + id?: number; + /** + * Score of the context under the index. + */ + score?: number; + }[]; +} +declare abstract class Base_Ai_Cf_Baai_Bge_Reranker_Base { + inputs: Ai_Cf_Baai_Bge_Reranker_Base_Input; + postProcessedOutputs: Ai_Cf_Baai_Bge_Reranker_Base_Output; +} +type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input = + | Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Prompt + | Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Messages; +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + /** + * The content of the message as a string. + */ + content: string; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + response_format?: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode_1; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_JSON_Mode_1 { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +type Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct { + inputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct_Output; +} +type Ai_Cf_Qwen_Qwq_32B_Input = Ai_Cf_Qwen_Qwq_32B_Prompt | Ai_Cf_Qwen_Qwq_32B_Messages; +interface Ai_Cf_Qwen_Qwq_32B_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwq_32B_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: + | string + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Qwen_Qwq_32B_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Qwen_Qwq_32B { + inputs: Ai_Cf_Qwen_Qwq_32B_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwq_32B_Output; +} +type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input = + | Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Prompt + | Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Messages; +interface Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. Must be supplied for tool calls for Mistral-3. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: + | string + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct { + inputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Input; + postProcessedOutputs: Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct_Output; +} +type Ai_Cf_Google_Gemma_3_12B_It_Input = Ai_Cf_Google_Gemma_3_12B_It_Prompt | Ai_Cf_Google_Gemma_3_12B_It_Messages; +interface Ai_Cf_Google_Gemma_3_12B_It_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Google_Gemma_3_12B_It_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + content?: + | string + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[]; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Google_Gemma_3_12B_It_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + /** + * The name of the tool to be called + */ + name?: string; + }[]; +}; +declare abstract class Base_Ai_Cf_Google_Gemma_3_12B_It { + inputs: Ai_Cf_Google_Gemma_3_12B_It_Input; + postProcessedOutputs: Ai_Cf_Google_Gemma_3_12B_It_Output; +} +type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input = + | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt + | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages + | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Async_Batch; +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: + | string + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Async_Batch { + requests: ( + | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt_Inner + | Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages_Inner + )[]; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Prompt_Inner { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Messages_Inner { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role?: string; + /** + * The tool call id. If you don't know what to put here you can fall back to 000000001 + */ + tool_call_id?: string; + content?: + | string + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }[] + | { + /** + * Type of the content provided + */ + type?: string; + text?: string; + image_url?: { + /** + * image uri with data (e.g. data:image/jpeg;base64,/9j/...). HTTP URL will not be accepted + */ + url?: string; + }; + }; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + response_format?: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_JSON_Mode; + /** + * JSON schema that should be fulfilled for the response. + */ + guided_json?: object; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +type Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output = { + /** + * The generated text response from the model + */ + response: string; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * An array of tool calls requests made during the response generation + */ + tool_calls?: { + /** + * The tool call id. + */ + id?: string; + /** + * Specifies the type of tool (e.g., 'function'). + */ + type?: string; + /** + * Details of the function tool. + */ + function?: { + /** + * The name of the tool to be called + */ + name?: string; + /** + * The arguments passed to be passed to the tool call request + */ + arguments?: object; + }; + }[]; +}; +declare abstract class Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct { + inputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Input; + postProcessedOutputs: Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct_Output; +} +type Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Input = + | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt + | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages + | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Async_Batch; +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + /** + * The content of the message as a string. + */ + content: string; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_1; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_1 { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Async_Batch { + requests: (Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt_1 | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages_1)[]; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Prompt_1 { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_2; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_2 { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Messages_1 { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + /** + * The content of the message as a string. + */ + content: string; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + response_format?: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_3; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_JSON_Mode_3 { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +type Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Output = + | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Chat_Completion_Response + | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Text_Completion_Response + | string + | Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_AsyncResponse; +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Chat_Completion_Response { + /** + * Unique identifier for the completion + */ + id?: string; + /** + * Object type identifier + */ + object?: 'chat.completion'; + /** + * Unix timestamp of when the completion was created + */ + created?: number; + /** + * Model used for the completion + */ + model?: string; + /** + * List of completion choices + */ + choices?: { + /** + * Index of the choice in the list + */ + index?: number; + /** + * The message generated by the model + */ + message?: { + /** + * Role of the message author + */ + role: string; + /** + * The content of the message + */ + content: string; + /** + * Internal reasoning content (if available) + */ + reasoning_content?: string; + /** + * Tool calls made by the assistant + */ + tool_calls?: { + /** + * Unique identifier for the tool call + */ + id: string; + /** + * Type of tool call + */ + type: 'function'; + function: { + /** + * Name of the function to call + */ + name: string; + /** + * JSON string of arguments for the function + */ + arguments: string; + }; + }[]; + }; + /** + * Reason why the model stopped generating + */ + finish_reason?: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + }[]; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * Log probabilities for the prompt (if requested) + */ + prompt_logprobs?: {} | null; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Text_Completion_Response { + /** + * Unique identifier for the completion + */ + id?: string; + /** + * Object type identifier + */ + object?: 'text_completion'; + /** + * Unix timestamp of when the completion was created + */ + created?: number; + /** + * Model used for the completion + */ + model?: string; + /** + * List of completion choices + */ + choices?: { + /** + * Index of the choice in the list + */ + index: number; + /** + * The generated text completion + */ + text: string; + /** + * Reason why the model stopped generating + */ + finish_reason: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + /** + * Log probabilities for the prompt (if requested) + */ + prompt_logprobs?: {} | null; + }[]; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; +} +interface Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8 { + inputs: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8_Output; +} +interface Ai_Cf_Deepgram_Nova_3_Input { + audio: { + body: object; + contentType: string; + }; + /** + * Sets how the model will interpret strings submitted to the custom_topic param. When strict, the model will only return topics submitted using the custom_topic param. When extended, the model will return its own detected topics in addition to those submitted using the custom_topic param. + */ + custom_topic_mode?: 'extended' | 'strict'; + /** + * Custom topics you want the model to detect within your input audio or text if present Submit up to 100 + */ + custom_topic?: string; + /** + * Sets how the model will interpret intents submitted to the custom_intent param. When strict, the model will only return intents submitted using the custom_intent param. When extended, the model will return its own detected intents in addition those submitted using the custom_intents param + */ + custom_intent_mode?: 'extended' | 'strict'; + /** + * Custom intents you want the model to detect within your input audio if present + */ + custom_intent?: string; + /** + * Identifies and extracts key entities from content in submitted audio + */ + detect_entities?: boolean; + /** + * Identifies the dominant language spoken in submitted audio + */ + detect_language?: boolean; + /** + * Recognize speaker changes. Each word in the transcript will be assigned a speaker number starting at 0 + */ + diarize?: boolean; + /** + * Identify and extract key entities from content in submitted audio + */ + dictation?: boolean; + /** + * Specify the expected encoding of your submitted audio + */ + encoding?: 'linear16' | 'flac' | 'mulaw' | 'amr-nb' | 'amr-wb' | 'opus' | 'speex' | 'g729'; + /** + * Arbitrary key-value pairs that are attached to the API response for usage in downstream processing + */ + extra?: string; + /** + * Filler Words can help transcribe interruptions in your audio, like 'uh' and 'um' + */ + filler_words?: boolean; + /** + * Key term prompting can boost or suppress specialized terminology and brands. + */ + keyterm?: string; + /** + * Keywords can boost or suppress specialized terminology and brands. + */ + keywords?: string; + /** + * The BCP-47 language tag that hints at the primary spoken language. Depending on the Model and API endpoint you choose only certain languages are available. + */ + language?: string; + /** + * Spoken measurements will be converted to their corresponding abbreviations. + */ + measurements?: boolean; + /** + * Opts out requests from the Deepgram Model Improvement Program. Refer to our Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip. + */ + mip_opt_out?: boolean; + /** + * Mode of operation for the model representing broad area of topic that will be talked about in the supplied audio + */ + mode?: 'general' | 'medical' | 'finance'; + /** + * Transcribe each audio channel independently. + */ + multichannel?: boolean; + /** + * Numerals converts numbers from written format to numerical format. + */ + numerals?: boolean; + /** + * Splits audio into paragraphs to improve transcript readability. + */ + paragraphs?: boolean; + /** + * Profanity Filter looks for recognized profanity and converts it to the nearest recognized non-profane word or removes it from the transcript completely. + */ + profanity_filter?: boolean; + /** + * Add punctuation and capitalization to the transcript. + */ + punctuate?: boolean; + /** + * Redaction removes sensitive information from your transcripts. + */ + redact?: string; + /** + * Search for terms or phrases in submitted audio and replaces them. + */ + replace?: string; + /** + * Search for terms or phrases in submitted audio. + */ + search?: string; + /** + * Recognizes the sentiment throughout a transcript or text. + */ + sentiment?: boolean; + /** + * Apply formatting to transcript output. When set to true, additional formatting will be applied to transcripts to improve readability. + */ + smart_format?: boolean; + /** + * Detect topics throughout a transcript or text. + */ + topics?: boolean; + /** + * Segments speech into meaningful semantic units. + */ + utterances?: boolean; + /** + * Seconds to wait before detecting a pause between words in submitted audio. + */ + utt_split?: number; + /** + * The number of channels in the submitted audio + */ + channels?: number; + /** + * Specifies whether the streaming endpoint should provide ongoing transcription updates as more audio is received. When set to true, the endpoint sends continuous updates, meaning transcription results may evolve over time. Note: Supported only for webosockets. + */ + interim_results?: boolean; + /** + * Indicates how long model will wait to detect whether a speaker has finished speaking or pauses for a significant period of time. When set to a value, the streaming endpoint immediately finalizes the transcription for the processed time range and returns the transcript with a speech_final parameter set to true. Can also be set to false to disable endpointing + */ + endpointing?: string; + /** + * Indicates that speech has started. You'll begin receiving Speech Started messages upon speech starting. Note: Supported only for webosockets. + */ + vad_events?: boolean; + /** + * Indicates how long model will wait to send an UtteranceEnd message after a word has been transcribed. Use with interim_results. Note: Supported only for webosockets. + */ + utterance_end_ms?: boolean; +} +interface Ai_Cf_Deepgram_Nova_3_Output { + results?: { + channels?: { + alternatives?: { + confidence?: number; + transcript?: string; + words?: { + confidence?: number; + end?: number; + start?: number; + word?: string; + }[]; + }[]; + }[]; + summary?: { + result?: string; + short?: string; + }; + sentiments?: { + segments?: { + text?: string; + start_word?: number; + end_word?: number; + sentiment?: string; + sentiment_score?: number; + }[]; + average?: { + sentiment?: string; + sentiment_score?: number; + }; + }; + }; +} +declare abstract class Base_Ai_Cf_Deepgram_Nova_3 { + inputs: Ai_Cf_Deepgram_Nova_3_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Nova_3_Output; +} +interface Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Input { + queries?: string | string[]; + /** + * Optional instruction for the task + */ + instruction?: string; + documents?: string | string[]; + text?: string | string[]; +} +interface Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Output { + data?: number[][]; + shape?: number[]; +} +declare abstract class Base_Ai_Cf_Qwen_Qwen3_Embedding_0_6B { + inputs: Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Input; + postProcessedOutputs: Ai_Cf_Qwen_Qwen3_Embedding_0_6B_Output; +} +type Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input = + | { + /** + * readable stream with audio data and content-type specified for that data + */ + audio: { + body: object; + contentType: string; + }; + /** + * type of data PCM data that's sent to the inference server as raw array + */ + dtype?: 'uint8' | 'float32' | 'float64'; + } + | { + /** + * base64 encoded audio data + */ + audio: string; + /** + * type of data PCM data that's sent to the inference server as raw array + */ + dtype?: 'uint8' | 'float32' | 'float64'; + }; +interface Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output { + /** + * if true, end-of-turn was detected + */ + is_complete?: boolean; + /** + * probability of the end-of-turn detection + */ + probability?: number; +} +declare abstract class Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2 { + inputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Input; + postProcessedOutputs: Ai_Cf_Pipecat_Ai_Smart_Turn_V2_Output; +} +declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_120B { + inputs: ResponsesInput; + postProcessedOutputs: ResponsesOutput; +} +declare abstract class Base_Ai_Cf_Openai_Gpt_Oss_20B { + inputs: ResponsesInput; + postProcessedOutputs: ResponsesOutput; +} +interface Ai_Cf_Leonardo_Phoenix_1_0_Input { + /** + * A text description of the image you want to generate. + */ + prompt: string; + /** + * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt + */ + guidance?: number; + /** + * Random seed for reproducibility of the image generation + */ + seed?: number; + /** + * The height of the generated image in pixels + */ + height?: number; + /** + * The width of the generated image in pixels + */ + width?: number; + /** + * The number of diffusion steps; higher values can improve quality but take longer + */ + num_steps?: number; + /** + * Specify what to exclude from the generated images + */ + negative_prompt?: string; +} +/** + * The generated image in JPEG format + */ +type Ai_Cf_Leonardo_Phoenix_1_0_Output = string; +declare abstract class Base_Ai_Cf_Leonardo_Phoenix_1_0 { + inputs: Ai_Cf_Leonardo_Phoenix_1_0_Input; + postProcessedOutputs: Ai_Cf_Leonardo_Phoenix_1_0_Output; +} +interface Ai_Cf_Leonardo_Lucid_Origin_Input { + /** + * A text description of the image you want to generate. + */ + prompt: string; + /** + * Controls how closely the generated image should adhere to the prompt; higher values make the image more aligned with the prompt + */ + guidance?: number; + /** + * Random seed for reproducibility of the image generation + */ + seed?: number; + /** + * The height of the generated image in pixels + */ + height?: number; + /** + * The width of the generated image in pixels + */ + width?: number; + /** + * The number of diffusion steps; higher values can improve quality but take longer + */ + num_steps?: number; + /** + * The number of diffusion steps; higher values can improve quality but take longer + */ + steps?: number; +} +interface Ai_Cf_Leonardo_Lucid_Origin_Output { + /** + * The generated image in Base64 format. + */ + image?: string; +} +declare abstract class Base_Ai_Cf_Leonardo_Lucid_Origin { + inputs: Ai_Cf_Leonardo_Lucid_Origin_Input; + postProcessedOutputs: Ai_Cf_Leonardo_Lucid_Origin_Output; +} +interface Ai_Cf_Deepgram_Aura_1_Input { + /** + * Speaker used to produce the audio. + */ + speaker?: + | 'angus' + | 'asteria' + | 'arcas' + | 'orion' + | 'orpheus' + | 'athena' + | 'luna' + | 'zeus' + | 'perseus' + | 'helios' + | 'hera' + | 'stella'; + /** + * Encoding of the output audio. + */ + encoding?: 'linear16' | 'flac' | 'mulaw' | 'alaw' | 'mp3' | 'opus' | 'aac'; + /** + * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type.. + */ + container?: 'none' | 'wav' | 'ogg'; + /** + * The text content to be converted to speech + */ + text: string; + /** + * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable + */ + sample_rate?: number; + /** + * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type. + */ + bit_rate?: number; +} +/** + * The generated audio in MP3 format + */ +type Ai_Cf_Deepgram_Aura_1_Output = string; +declare abstract class Base_Ai_Cf_Deepgram_Aura_1 { + inputs: Ai_Cf_Deepgram_Aura_1_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Aura_1_Output; +} +interface Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Input { + /** + * Input text to translate. Can be a single string or a list of strings. + */ + text: string | string[]; + /** + * Target language to translate to + */ + target_language: + | 'asm_Beng' + | 'awa_Deva' + | 'ben_Beng' + | 'bho_Deva' + | 'brx_Deva' + | 'doi_Deva' + | 'eng_Latn' + | 'gom_Deva' + | 'gon_Deva' + | 'guj_Gujr' + | 'hin_Deva' + | 'hne_Deva' + | 'kan_Knda' + | 'kas_Arab' + | 'kas_Deva' + | 'kha_Latn' + | 'lus_Latn' + | 'mag_Deva' + | 'mai_Deva' + | 'mal_Mlym' + | 'mar_Deva' + | 'mni_Beng' + | 'mni_Mtei' + | 'npi_Deva' + | 'ory_Orya' + | 'pan_Guru' + | 'san_Deva' + | 'sat_Olck' + | 'snd_Arab' + | 'snd_Deva' + | 'tam_Taml' + | 'tel_Telu' + | 'urd_Arab' + | 'unr_Deva'; +} +interface Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Output { + /** + * Translated texts + */ + translations: string[]; +} +declare abstract class Base_Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B { + inputs: Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Input; + postProcessedOutputs: Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B_Output; +} +type Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Input = + | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt + | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages + | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Async_Batch; +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + /** + * The content of the message as a string. + */ + content: string; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_1; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_1 { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Async_Batch { + requests: ( + | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt_1 + | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages_1 + )[]; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Prompt_1 { + /** + * The input text prompt for the model to generate a response. + */ + prompt: string; + /** + * Name of the LoRA (Low-Rank Adaptation) model to fine-tune the base model. + */ + lora?: string; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_2; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_2 { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Messages_1 { + /** + * An array of message objects representing the conversation history. + */ + messages: { + /** + * The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool'). + */ + role: string; + /** + * The content of the message as a string. + */ + content: string; + }[]; + functions?: { + name: string; + code: string; + }[]; + /** + * A list of tools available for the assistant to use. + */ + tools?: ( + | { + /** + * The name of the tool. More descriptive the better. + */ + name: string; + /** + * A brief description of what the tool does. + */ + description: string; + /** + * Schema defining the parameters accepted by the tool. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + } + | { + /** + * Specifies the type of tool (e.g., 'function'). + */ + type: string; + /** + * Details of the function tool. + */ + function: { + /** + * The name of the function. + */ + name: string; + /** + * A brief description of what the function does. + */ + description: string; + /** + * Schema defining the parameters accepted by the function. + */ + parameters: { + /** + * The type of the parameters object (usually 'object'). + */ + type: string; + /** + * List of required parameter names. + */ + required?: string[]; + /** + * Definitions of each parameter. + */ + properties: { + [k: string]: { + /** + * The data type of the parameter. + */ + type: string; + /** + * A description of the expected parameter. + */ + description: string; + }; + }; + }; + }; + } + )[]; + response_format?: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_3; + /** + * If true, a chat template is not applied and you must adhere to the specific model's expected formatting. + */ + raw?: boolean; + /** + * If true, the response will be streamed back incrementally using SSE, Server Sent Events. + */ + stream?: boolean; + /** + * The maximum number of tokens to generate in the response. + */ + max_tokens?: number; + /** + * Controls the randomness of the output; higher values produce more random results. + */ + temperature?: number; + /** + * Adjusts the creativity of the AI's responses by controlling how many possible words it considers. Lower values make outputs more predictable; higher values allow for more varied and creative responses. + */ + top_p?: number; + /** + * Limits the AI to choose from the top 'k' most probable words. Lower values make responses more focused; higher values introduce more variety and potential surprises. + */ + top_k?: number; + /** + * Random seed for reproducibility of the generation. + */ + seed?: number; + /** + * Penalty for repeated tokens; higher values discourage repetition. + */ + repetition_penalty?: number; + /** + * Decreases the likelihood of the model repeating the same lines verbatim. + */ + frequency_penalty?: number; + /** + * Increases the likelihood of the model introducing new topics. + */ + presence_penalty?: number; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_JSON_Mode_3 { + type?: 'json_object' | 'json_schema'; + json_schema?: unknown; +} +type Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Output = + | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Chat_Completion_Response + | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Text_Completion_Response + | string + | Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_AsyncResponse; +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Chat_Completion_Response { + /** + * Unique identifier for the completion + */ + id?: string; + /** + * Object type identifier + */ + object?: 'chat.completion'; + /** + * Unix timestamp of when the completion was created + */ + created?: number; + /** + * Model used for the completion + */ + model?: string; + /** + * List of completion choices + */ + choices?: { + /** + * Index of the choice in the list + */ + index?: number; + /** + * The message generated by the model + */ + message?: { + /** + * Role of the message author + */ + role: string; + /** + * The content of the message + */ + content: string; + /** + * Internal reasoning content (if available) + */ + reasoning_content?: string; + /** + * Tool calls made by the assistant + */ + tool_calls?: { + /** + * Unique identifier for the tool call + */ + id: string; + /** + * Type of tool call + */ + type: 'function'; + function: { + /** + * Name of the function to call + */ + name: string; + /** + * JSON string of arguments for the function + */ + arguments: string; + }; + }[]; + }; + /** + * Reason why the model stopped generating + */ + finish_reason?: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + }[]; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; + /** + * Log probabilities for the prompt (if requested) + */ + prompt_logprobs?: {} | null; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Text_Completion_Response { + /** + * Unique identifier for the completion + */ + id?: string; + /** + * Object type identifier + */ + object?: 'text_completion'; + /** + * Unix timestamp of when the completion was created + */ + created?: number; + /** + * Model used for the completion + */ + model?: string; + /** + * List of completion choices + */ + choices?: { + /** + * Index of the choice in the list + */ + index: number; + /** + * The generated text completion + */ + text: string; + /** + * Reason why the model stopped generating + */ + finish_reason: string; + /** + * Stop reason (may be null) + */ + stop_reason?: string | null; + /** + * Log probabilities (if requested) + */ + logprobs?: {} | null; + /** + * Log probabilities for the prompt (if requested) + */ + prompt_logprobs?: {} | null; + }[]; + /** + * Usage statistics for the inference request + */ + usage?: { + /** + * Total number of tokens in input + */ + prompt_tokens?: number; + /** + * Total number of tokens in output + */ + completion_tokens?: number; + /** + * Total number of input and output tokens + */ + total_tokens?: number; + }; +} +interface Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_AsyncResponse { + /** + * The async request id that can be used to obtain the results. + */ + request_id?: string; +} +declare abstract class Base_Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It { + inputs: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Input; + postProcessedOutputs: Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It_Output; +} +interface Ai_Cf_Pfnet_Plamo_Embedding_1B_Input { + /** + * Input text to embed. Can be a single string or a list of strings. + */ + text: string | string[]; +} +interface Ai_Cf_Pfnet_Plamo_Embedding_1B_Output { + /** + * Embedding vectors, where each vector is a list of floats. + */ + data: number[][]; + /** + * Shape of the embedding data as [number_of_embeddings, embedding_dimension]. + * + * @minItems 2 + * @maxItems 2 + */ + shape: [number, number]; +} +declare abstract class Base_Ai_Cf_Pfnet_Plamo_Embedding_1B { + inputs: Ai_Cf_Pfnet_Plamo_Embedding_1B_Input; + postProcessedOutputs: Ai_Cf_Pfnet_Plamo_Embedding_1B_Output; +} +interface Ai_Cf_Deepgram_Flux_Input { + /** + * Encoding of the audio stream. Currently only supports raw signed little-endian 16-bit PCM. + */ + encoding: 'linear16'; + /** + * Sample rate of the audio stream in Hz. + */ + sample_rate: string; + /** + * End-of-turn confidence required to fire an eager end-of-turn event. When set, enables EagerEndOfTurn and TurnResumed events. Valid Values 0.3 - 0.9. + */ + eager_eot_threshold?: string; + /** + * End-of-turn confidence required to finish a turn. Valid Values 0.5 - 0.9. + */ + eot_threshold?: string; + /** + * A turn will be finished when this much time has passed after speech, regardless of EOT confidence. + */ + eot_timeout_ms?: string; + /** + * Keyterm prompting can improve recognition of specialized terminology. Pass multiple keyterm query parameters to boost multiple keyterms. + */ + keyterm?: string; + /** + * Opts out requests from the Deepgram Model Improvement Program. Refer to Deepgram Docs for pricing impacts before setting this to true. https://dpgr.am/deepgram-mip + */ + mip_opt_out?: 'true' | 'false'; + /** + * Label your requests for the purpose of identification during usage reporting + */ + tag?: string; +} +/** + * Output will be returned as websocket messages. + */ +interface Ai_Cf_Deepgram_Flux_Output { + /** + * The unique identifier of the request (uuid) + */ + request_id?: string; + /** + * Starts at 0 and increments for each message the server sends to the client. + */ + sequence_id?: number; + /** + * The type of event being reported. + */ + event?: 'Update' | 'StartOfTurn' | 'EagerEndOfTurn' | 'TurnResumed' | 'EndOfTurn'; + /** + * The index of the current turn + */ + turn_index?: number; + /** + * Start time in seconds of the audio range that was transcribed + */ + audio_window_start?: number; + /** + * End time in seconds of the audio range that was transcribed + */ + audio_window_end?: number; + /** + * Text that was said over the course of the current turn + */ + transcript?: string; + /** + * The words in the transcript + */ + words?: { + /** + * The individual punctuated, properly-cased word from the transcript + */ + word: string; + /** + * Confidence that this word was transcribed correctly + */ + confidence: number; + }[]; + /** + * Confidence that no more speech is coming in this turn + */ + end_of_turn_confidence?: number; +} +declare abstract class Base_Ai_Cf_Deepgram_Flux { + inputs: Ai_Cf_Deepgram_Flux_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Flux_Output; +} +interface Ai_Cf_Deepgram_Aura_2_En_Input { + /** + * Speaker used to produce the audio. + */ + speaker?: + | 'amalthea' + | 'andromeda' + | 'apollo' + | 'arcas' + | 'aries' + | 'asteria' + | 'athena' + | 'atlas' + | 'aurora' + | 'callista' + | 'cora' + | 'cordelia' + | 'delia' + | 'draco' + | 'electra' + | 'harmonia' + | 'helena' + | 'hera' + | 'hermes' + | 'hyperion' + | 'iris' + | 'janus' + | 'juno' + | 'jupiter' + | 'luna' + | 'mars' + | 'minerva' + | 'neptune' + | 'odysseus' + | 'ophelia' + | 'orion' + | 'orpheus' + | 'pandora' + | 'phoebe' + | 'pluto' + | 'saturn' + | 'thalia' + | 'theia' + | 'vesta' + | 'zeus'; + /** + * Encoding of the output audio. + */ + encoding?: 'linear16' | 'flac' | 'mulaw' | 'alaw' | 'mp3' | 'opus' | 'aac'; + /** + * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type.. + */ + container?: 'none' | 'wav' | 'ogg'; + /** + * The text content to be converted to speech + */ + text: string; + /** + * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable + */ + sample_rate?: number; + /** + * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type. + */ + bit_rate?: number; +} +/** + * The generated audio in MP3 format + */ +type Ai_Cf_Deepgram_Aura_2_En_Output = string; +declare abstract class Base_Ai_Cf_Deepgram_Aura_2_En { + inputs: Ai_Cf_Deepgram_Aura_2_En_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Aura_2_En_Output; +} +interface Ai_Cf_Deepgram_Aura_2_Es_Input { + /** + * Speaker used to produce the audio. + */ + speaker?: + | 'sirio' + | 'nestor' + | 'carina' + | 'celeste' + | 'alvaro' + | 'diana' + | 'aquila' + | 'selena' + | 'estrella' + | 'javier'; + /** + * Encoding of the output audio. + */ + encoding?: 'linear16' | 'flac' | 'mulaw' | 'alaw' | 'mp3' | 'opus' | 'aac'; + /** + * Container specifies the file format wrapper for the output audio. The available options depend on the encoding type.. + */ + container?: 'none' | 'wav' | 'ogg'; + /** + * The text content to be converted to speech + */ + text: string; + /** + * Sample Rate specifies the sample rate for the output audio. Based on the encoding, different sample rates are supported. For some encodings, the sample rate is not configurable + */ + sample_rate?: number; + /** + * The bitrate of the audio in bits per second. Choose from predefined ranges or specific values based on the encoding type. + */ + bit_rate?: number; +} +/** + * The generated audio in MP3 format + */ +type Ai_Cf_Deepgram_Aura_2_Es_Output = string; +declare abstract class Base_Ai_Cf_Deepgram_Aura_2_Es { + inputs: Ai_Cf_Deepgram_Aura_2_Es_Input; + postProcessedOutputs: Ai_Cf_Deepgram_Aura_2_Es_Output; +} +interface AiModels { + '@cf/huggingface/distilbert-sst-2-int8': BaseAiTextClassification; + '@cf/stabilityai/stable-diffusion-xl-base-1.0': BaseAiTextToImage; + '@cf/runwayml/stable-diffusion-v1-5-inpainting': BaseAiTextToImage; + '@cf/runwayml/stable-diffusion-v1-5-img2img': BaseAiTextToImage; + '@cf/lykon/dreamshaper-8-lcm': BaseAiTextToImage; + '@cf/bytedance/stable-diffusion-xl-lightning': BaseAiTextToImage; + '@cf/myshell-ai/melotts': BaseAiTextToSpeech; + '@cf/google/embeddinggemma-300m': BaseAiTextEmbeddings; + '@cf/microsoft/resnet-50': BaseAiImageClassification; + '@cf/meta/llama-2-7b-chat-int8': BaseAiTextGeneration; + '@cf/mistral/mistral-7b-instruct-v0.1': BaseAiTextGeneration; + '@cf/meta/llama-2-7b-chat-fp16': BaseAiTextGeneration; + '@hf/thebloke/llama-2-13b-chat-awq': BaseAiTextGeneration; + '@hf/thebloke/mistral-7b-instruct-v0.1-awq': BaseAiTextGeneration; + '@hf/thebloke/zephyr-7b-beta-awq': BaseAiTextGeneration; + '@hf/thebloke/openhermes-2.5-mistral-7b-awq': BaseAiTextGeneration; + '@hf/thebloke/neural-chat-7b-v3-1-awq': BaseAiTextGeneration; + '@hf/thebloke/llamaguard-7b-awq': BaseAiTextGeneration; + '@hf/thebloke/deepseek-coder-6.7b-base-awq': BaseAiTextGeneration; + '@hf/thebloke/deepseek-coder-6.7b-instruct-awq': BaseAiTextGeneration; + '@cf/deepseek-ai/deepseek-math-7b-instruct': BaseAiTextGeneration; + '@cf/defog/sqlcoder-7b-2': BaseAiTextGeneration; + '@cf/openchat/openchat-3.5-0106': BaseAiTextGeneration; + '@cf/tiiuae/falcon-7b-instruct': BaseAiTextGeneration; + '@cf/thebloke/discolm-german-7b-v1-awq': BaseAiTextGeneration; + '@cf/qwen/qwen1.5-0.5b-chat': BaseAiTextGeneration; + '@cf/qwen/qwen1.5-7b-chat-awq': BaseAiTextGeneration; + '@cf/qwen/qwen1.5-14b-chat-awq': BaseAiTextGeneration; + '@cf/tinyllama/tinyllama-1.1b-chat-v1.0': BaseAiTextGeneration; + '@cf/microsoft/phi-2': BaseAiTextGeneration; + '@cf/qwen/qwen1.5-1.8b-chat': BaseAiTextGeneration; + '@cf/mistral/mistral-7b-instruct-v0.2-lora': BaseAiTextGeneration; + '@hf/nousresearch/hermes-2-pro-mistral-7b': BaseAiTextGeneration; + '@hf/nexusflow/starling-lm-7b-beta': BaseAiTextGeneration; + '@hf/google/gemma-7b-it': BaseAiTextGeneration; + '@cf/meta-llama/llama-2-7b-chat-hf-lora': BaseAiTextGeneration; + '@cf/google/gemma-2b-it-lora': BaseAiTextGeneration; + '@cf/google/gemma-7b-it-lora': BaseAiTextGeneration; + '@hf/mistral/mistral-7b-instruct-v0.2': BaseAiTextGeneration; + '@cf/meta/llama-3-8b-instruct': BaseAiTextGeneration; + '@cf/fblgit/una-cybertron-7b-v2-bf16': BaseAiTextGeneration; + '@cf/meta/llama-3-8b-instruct-awq': BaseAiTextGeneration; + '@cf/meta/llama-3.1-8b-instruct-fp8': BaseAiTextGeneration; + '@cf/meta/llama-3.1-8b-instruct-awq': BaseAiTextGeneration; + '@cf/meta/llama-3.2-3b-instruct': BaseAiTextGeneration; + '@cf/meta/llama-3.2-1b-instruct': BaseAiTextGeneration; + '@cf/deepseek-ai/deepseek-r1-distill-qwen-32b': BaseAiTextGeneration; + '@cf/ibm-granite/granite-4.0-h-micro': BaseAiTextGeneration; + '@cf/facebook/bart-large-cnn': BaseAiSummarization; + '@cf/llava-hf/llava-1.5-7b-hf': BaseAiImageToText; + '@cf/baai/bge-base-en-v1.5': Base_Ai_Cf_Baai_Bge_Base_En_V1_5; + '@cf/openai/whisper': Base_Ai_Cf_Openai_Whisper; + '@cf/meta/m2m100-1.2b': Base_Ai_Cf_Meta_M2M100_1_2B; + '@cf/baai/bge-small-en-v1.5': Base_Ai_Cf_Baai_Bge_Small_En_V1_5; + '@cf/baai/bge-large-en-v1.5': Base_Ai_Cf_Baai_Bge_Large_En_V1_5; + '@cf/unum/uform-gen2-qwen-500m': Base_Ai_Cf_Unum_Uform_Gen2_Qwen_500M; + '@cf/openai/whisper-tiny-en': Base_Ai_Cf_Openai_Whisper_Tiny_En; + '@cf/openai/whisper-large-v3-turbo': Base_Ai_Cf_Openai_Whisper_Large_V3_Turbo; + '@cf/baai/bge-m3': Base_Ai_Cf_Baai_Bge_M3; + '@cf/black-forest-labs/flux-1-schnell': Base_Ai_Cf_Black_Forest_Labs_Flux_1_Schnell; + '@cf/meta/llama-3.2-11b-vision-instruct': Base_Ai_Cf_Meta_Llama_3_2_11B_Vision_Instruct; + '@cf/meta/llama-3.3-70b-instruct-fp8-fast': Base_Ai_Cf_Meta_Llama_3_3_70B_Instruct_Fp8_Fast; + '@cf/meta/llama-guard-3-8b': Base_Ai_Cf_Meta_Llama_Guard_3_8B; + '@cf/baai/bge-reranker-base': Base_Ai_Cf_Baai_Bge_Reranker_Base; + '@cf/qwen/qwen2.5-coder-32b-instruct': Base_Ai_Cf_Qwen_Qwen2_5_Coder_32B_Instruct; + '@cf/qwen/qwq-32b': Base_Ai_Cf_Qwen_Qwq_32B; + '@cf/mistralai/mistral-small-3.1-24b-instruct': Base_Ai_Cf_Mistralai_Mistral_Small_3_1_24B_Instruct; + '@cf/google/gemma-3-12b-it': Base_Ai_Cf_Google_Gemma_3_12B_It; + '@cf/meta/llama-4-scout-17b-16e-instruct': Base_Ai_Cf_Meta_Llama_4_Scout_17B_16E_Instruct; + '@cf/qwen/qwen3-30b-a3b-fp8': Base_Ai_Cf_Qwen_Qwen3_30B_A3B_Fp8; + '@cf/deepgram/nova-3': Base_Ai_Cf_Deepgram_Nova_3; + '@cf/qwen/qwen3-embedding-0.6b': Base_Ai_Cf_Qwen_Qwen3_Embedding_0_6B; + '@cf/pipecat-ai/smart-turn-v2': Base_Ai_Cf_Pipecat_Ai_Smart_Turn_V2; + '@cf/openai/gpt-oss-120b': Base_Ai_Cf_Openai_Gpt_Oss_120B; + '@cf/openai/gpt-oss-20b': Base_Ai_Cf_Openai_Gpt_Oss_20B; + '@cf/leonardo/phoenix-1.0': Base_Ai_Cf_Leonardo_Phoenix_1_0; + '@cf/leonardo/lucid-origin': Base_Ai_Cf_Leonardo_Lucid_Origin; + '@cf/deepgram/aura-1': Base_Ai_Cf_Deepgram_Aura_1; + '@cf/ai4bharat/indictrans2-en-indic-1B': Base_Ai_Cf_Ai4Bharat_Indictrans2_En_Indic_1B; + '@cf/aisingapore/gemma-sea-lion-v4-27b-it': Base_Ai_Cf_Aisingapore_Gemma_Sea_Lion_V4_27B_It; + '@cf/pfnet/plamo-embedding-1b': Base_Ai_Cf_Pfnet_Plamo_Embedding_1B; + '@cf/deepgram/flux': Base_Ai_Cf_Deepgram_Flux; + '@cf/deepgram/aura-2-en': Base_Ai_Cf_Deepgram_Aura_2_En; + '@cf/deepgram/aura-2-es': Base_Ai_Cf_Deepgram_Aura_2_Es; +} +type AiOptions = { + /** + * Send requests as an asynchronous batch job, only works for supported models + * https://developers.cloudflare.com/workers-ai/features/batch-api + */ + queueRequest?: boolean; + /** + * Establish websocket connections, only works for supported models + */ + websocket?: boolean; + /** + * Tag your requests to group and view them in Cloudflare dashboard. + * + * Rules: + * Tags must only contain letters, numbers, and the symbols: : - . / @ + * Each tag can have maximum 50 characters. + * Maximum 5 tags are allowed each request. + * Duplicate tags will removed. + */ + tags?: string[]; + gateway?: GatewayOptions; + returnRawResponse?: boolean; + prefix?: string; + extraHeaders?: object; +}; +type AiModelsSearchParams = { + author?: string; + hide_experimental?: boolean; + page?: number; + per_page?: number; + search?: string; + source?: number; + task?: string; +}; +type AiModelsSearchObject = { + id: string; + source: number; + name: string; + description: string; + task: { + id: string; + name: string; + description: string; + }; + tags: string[]; + properties: { + property_id: string; + value: string; + }[]; +}; +interface InferenceUpstreamError extends Error {} +interface AiInternalError extends Error {} +type AiModelListType = Record; +declare abstract class Ai { + aiGatewayLogId: string | null; + gateway(gatewayId: string): AiGateway; + /** + * Access the AI Search API for managing AI-powered search instances. + * + * This is the new API that replaces AutoRAG with better namespace separation: + * - Account-level operations: `list()`, `create()` + * - Instance-level operations: `get(id).search()`, `get(id).chatCompletions()`, `get(id).delete()` + * + * @example + * ```typescript + * // List all AI Search instances + * const instances = await env.AI.aiSearch.list(); + * + * // Search an instance + * const results = await env.AI.aiSearch.get('my-search').search({ + * messages: [{ role: 'user', content: 'What is the policy?' }], + * ai_search_options: { + * retrieval: { max_num_results: 10 } + * } + * }); + * + * // Generate chat completions with AI Search context + * const response = await env.AI.aiSearch.get('my-search').chatCompletions({ + * messages: [{ role: 'user', content: 'What is the policy?' }], + * model: '@cf/meta/llama-3.3-70b-instruct-fp8-fast' + * }); + * ``` + */ + aiSearch(): AiSearchAccountService; + /** + * @deprecated AutoRAG has been replaced by AI Search. + * Use `env.AI.aiSearch` instead for better API design and new features. + * + * Migration guide: + * - `env.AI.autorag().list()` → `env.AI.aiSearch.list()` + * - `env.AI.autorag('id').search({ query: '...' })` → `env.AI.aiSearch.get('id').search({ messages: [{ role: 'user', content: '...' }] })` + * - `env.AI.autorag('id').aiSearch(...)` → `env.AI.aiSearch.get('id').chatCompletions(...)` + * + * Note: The old API continues to work for backwards compatibility, but new projects should use AI Search. + * + * @see AiSearchAccountService + * @param autoragId Optional instance ID (omit for account-level operations) + */ + autorag(autoragId: string): AutoRAG; + run( + model: Name, + inputs: InputOptions, + options?: Options, + ): Promise< + Options extends + | { + returnRawResponse: true; + } + | { + websocket: true; + } + ? Response + : InputOptions extends { + stream: true; + } + ? ReadableStream + : AiModelList[Name]['postProcessedOutputs'] + >; + models(params?: AiModelsSearchParams): Promise; + toMarkdown(): ToMarkdownService; + toMarkdown(files: MarkdownDocument[], options?: ConversionRequestOptions): Promise; + toMarkdown(files: MarkdownDocument, options?: ConversionRequestOptions): Promise; +} +type GatewayRetries = { + maxAttempts?: 1 | 2 | 3 | 4 | 5; + retryDelayMs?: number; + backoff?: 'constant' | 'linear' | 'exponential'; +}; +type GatewayOptions = { + id: string; + cacheKey?: string; + cacheTtl?: number; + skipCache?: boolean; + metadata?: Record; + collectLog?: boolean; + eventId?: string; + requestTimeoutMs?: number; + retries?: GatewayRetries; +}; +type UniversalGatewayOptions = Exclude & { + /** + ** @deprecated + */ + id?: string; +}; +type AiGatewayPatchLog = { + score?: number | null; + feedback?: -1 | 1 | null; + metadata?: Record | null; +}; +type AiGatewayLog = { + id: string; + provider: string; + model: string; + model_type?: string; + path: string; + duration: number; + request_type?: string; + request_content_type?: string; + status_code: number; + response_content_type?: string; + success: boolean; + cached: boolean; + tokens_in?: number; + tokens_out?: number; + metadata?: Record; + step?: number; + cost?: number; + custom_cost?: boolean; + request_size: number; + request_head?: string; + request_head_complete: boolean; + response_size: number; + response_head?: string; + response_head_complete: boolean; + created_at: Date; +}; +type AIGatewayProviders = + | 'workers-ai' + | 'anthropic' + | 'aws-bedrock' + | 'azure-openai' + | 'google-vertex-ai' + | 'huggingface' + | 'openai' + | 'perplexity-ai' + | 'replicate' + | 'groq' + | 'cohere' + | 'google-ai-studio' + | 'mistral' + | 'grok' + | 'openrouter' + | 'deepseek' + | 'cerebras' + | 'cartesia' + | 'elevenlabs' + | 'adobe-firefly'; +type AIGatewayHeaders = { + 'cf-aig-metadata': Record | string; + 'cf-aig-custom-cost': + | { + per_token_in?: number; + per_token_out?: number; + } + | { + total_cost?: number; + } + | string; + 'cf-aig-cache-ttl': number | string; + 'cf-aig-skip-cache': boolean | string; + 'cf-aig-cache-key': string; + 'cf-aig-event-id': string; + 'cf-aig-request-timeout': number | string; + 'cf-aig-max-attempts': number | string; + 'cf-aig-retry-delay': number | string; + 'cf-aig-backoff': string; + 'cf-aig-collect-log': boolean | string; + Authorization: string; + 'Content-Type': string; + [key: string]: string | number | boolean | object; +}; +type AIGatewayUniversalRequest = { + provider: AIGatewayProviders | string; // eslint-disable-line + endpoint: string; + headers: Partial; + query: unknown; +}; +interface AiGatewayInternalError extends Error {} +interface AiGatewayLogNotFound extends Error {} +declare abstract class AiGateway { + patchLog(logId: string, data: AiGatewayPatchLog): Promise; + getLog(logId: string): Promise; + run( + data: AIGatewayUniversalRequest | AIGatewayUniversalRequest[], + options?: { + gateway?: UniversalGatewayOptions; + extraHeaders?: object; + }, + ): Promise; + getUrl(provider?: AIGatewayProviders | string): Promise; // eslint-disable-line +} +/** + * @deprecated AutoRAG has been replaced by AI Search. Use AiSearchInternalError instead. + * @see AiSearchInternalError + */ +interface AutoRAGInternalError extends Error {} +/** + * @deprecated AutoRAG has been replaced by AI Search. Use AiSearchNotFoundError instead. + * @see AiSearchNotFoundError + */ +interface AutoRAGNotFoundError extends Error {} +/** + * @deprecated This error type is no longer used in the AI Search API. + */ +interface AutoRAGUnauthorizedError extends Error {} +/** + * @deprecated AutoRAG has been replaced by AI Search. Use AiSearchNameNotSetError instead. + * @see AiSearchNameNotSetError + */ +interface AutoRAGNameNotSetError extends Error {} +type ComparisonFilter = { + key: string; + type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte'; + value: string | number | boolean; +}; +type CompoundFilter = { + type: 'and' | 'or'; + filters: ComparisonFilter[]; +}; +/** + * @deprecated AutoRAG has been replaced by AI Search. + * Use AiSearchSearchRequest with the new API instead. + * @see AiSearchSearchRequest + */ +type AutoRagSearchRequest = { + query: string; + filters?: CompoundFilter | ComparisonFilter; + max_num_results?: number; + ranking_options?: { + ranker?: string; + score_threshold?: number; + }; + reranking?: { + enabled?: boolean; + model?: string; + }; + rewrite_query?: boolean; +}; +/** + * @deprecated AutoRAG has been replaced by AI Search. + * Use AiSearchChatCompletionsRequest with the new API instead. + * @see AiSearchChatCompletionsRequest + */ +type AutoRagAiSearchRequest = AutoRagSearchRequest & { + stream?: boolean; + system_prompt?: string; +}; +/** + * @deprecated AutoRAG has been replaced by AI Search. + * Use AiSearchChatCompletionsRequest with stream: true instead. + * @see AiSearchChatCompletionsRequest + */ +type AutoRagAiSearchRequestStreaming = Omit & { + stream: true; +}; +/** + * @deprecated AutoRAG has been replaced by AI Search. + * Use AiSearchSearchResponse with the new API instead. + * @see AiSearchSearchResponse + */ +type AutoRagSearchResponse = { + object: 'vector_store.search_results.page'; + search_query: string; + data: { + file_id: string; + filename: string; + score: number; + attributes: Record; + content: { + type: 'text'; + text: string; + }[]; + }[]; + has_more: boolean; + next_page: string | null; +}; +/** + * @deprecated AutoRAG has been replaced by AI Search. + * Use AiSearchListResponse with the new API instead. + * @see AiSearchListResponse + */ +type AutoRagListResponse = { + id: string; + enable: boolean; + type: string; + source: string; + vectorize_name: string; + paused: boolean; + status: string; +}[]; +/** + * @deprecated AutoRAG has been replaced by AI Search. + * The new API returns different response formats for chat completions. + */ +type AutoRagAiSearchResponse = AutoRagSearchResponse & { + response: string; +}; +/** + * @deprecated AutoRAG has been replaced by AI Search. + * Use the new AI Search API instead: `env.AI.aiSearch` + * + * Migration guide: + * - `env.AI.autorag().list()` → `env.AI.aiSearch.list()` + * - `env.AI.autorag('id').search(...)` → `env.AI.aiSearch.get('id').search(...)` + * - `env.AI.autorag('id').aiSearch(...)` → `env.AI.aiSearch.get('id').chatCompletions(...)` + * + * @see AiSearchAccountService + * @see AiSearchInstanceService + */ +declare abstract class AutoRAG { + /** + * @deprecated Use `env.AI.aiSearch.list()` instead. + * @see AiSearchAccountService.list + */ + list(): Promise; + /** + * @deprecated Use `env.AI.aiSearch.get(id).search(...)` instead. + * Note: The new API uses a messages array instead of a query string. + * @see AiSearchInstanceService.search + */ + search(params: AutoRagSearchRequest): Promise; + /** + * @deprecated Use `env.AI.aiSearch.get(id).chatCompletions(...)` instead. + * @see AiSearchInstanceService.chatCompletions + */ + aiSearch(params: AutoRagAiSearchRequestStreaming): Promise; + /** + * @deprecated Use `env.AI.aiSearch.get(id).chatCompletions(...)` instead. + * @see AiSearchInstanceService.chatCompletions + */ + aiSearch(params: AutoRagAiSearchRequest): Promise; + /** + * @deprecated Use `env.AI.aiSearch.get(id).chatCompletions(...)` instead. + * @see AiSearchInstanceService.chatCompletions + */ + aiSearch(params: AutoRagAiSearchRequest): Promise; +} +interface BasicImageTransformations { + /** + * Maximum width in image pixels. The value must be an integer. + */ + width?: number; + /** + * Maximum height in image pixels. The value must be an integer. + */ + height?: number; + /** + * Resizing mode as a string. It affects interpretation of width and height + * options: + * - scale-down: Similar to contain, but the image is never enlarged. If + * the image is larger than given width or height, it will be resized. + * Otherwise its original size will be kept. + * - contain: Resizes to maximum size that fits within the given width and + * height. If only a single dimension is given (e.g. only width), the + * image will be shrunk or enlarged to exactly match that dimension. + * Aspect ratio is always preserved. + * - cover: Resizes (shrinks or enlarges) to fill the entire area of width + * and height. If the image has an aspect ratio different from the ratio + * of width and height, it will be cropped to fit. + * - crop: The image will be shrunk and cropped to fit within the area + * specified by width and height. The image will not be enlarged. For images + * smaller than the given dimensions it's the same as scale-down. For + * images larger than the given dimensions, it's the same as cover. + * See also trim. + * - pad: Resizes to the maximum size that fits within the given width and + * height, and then fills the remaining area with a background color + * (white by default). Use of this mode is not recommended, as the same + * effect can be more efficiently achieved with the contain mode and the + * CSS object-fit: contain property. + * - squeeze: Stretches and deforms to the width and height given, even if it + * breaks aspect ratio + */ + fit?: 'scale-down' | 'contain' | 'cover' | 'crop' | 'pad' | 'squeeze'; + /** + * Image segmentation using artificial intelligence models. Sets pixels not + * within selected segment area to transparent e.g "foreground" sets every + * background pixel as transparent. + */ + segment?: 'foreground'; + /** + * When cropping with fit: "cover", this defines the side or point that should + * be left uncropped. The value is either a string + * "left", "right", "top", "bottom", "auto", or "center" (the default), + * or an object {x, y} containing focal point coordinates in the original + * image expressed as fractions ranging from 0.0 (top or left) to 1.0 + * (bottom or right), 0.5 being the center. {fit: "cover", gravity: "top"} will + * crop bottom or left and right sides as necessary, but won’t crop anything + * from the top. {fit: "cover", gravity: {x:0.5, y:0.2}} will crop each side to + * preserve as much as possible around a point at 20% of the height of the + * source image. + */ + gravity?: + | 'face' + | 'left' + | 'right' + | 'top' + | 'bottom' + | 'center' + | 'auto' + | 'entropy' + | BasicImageTransformationsGravityCoordinates; + /** + * Background color to add underneath the image. Applies only to images with + * transparency (such as PNG). Accepts any CSS color (#RRGGBB, rgba(…), + * hsl(…), etc.) + */ + background?: string; + /** + * Number of degrees (90, 180, 270) to rotate the image by. width and height + * options refer to axes after rotation. + */ + rotate?: 0 | 90 | 180 | 270 | 360; +} +interface BasicImageTransformationsGravityCoordinates { + x?: number; + y?: number; + mode?: 'remainder' | 'box-center'; +} +/** + * In addition to the properties you can set in the RequestInit dict + * that you pass as an argument to the Request constructor, you can + * set certain properties of a `cf` object to control how Cloudflare + * features are applied to that new Request. + * + * Note: Currently, these properties cannot be tested in the + * playground. + */ +interface RequestInitCfProperties extends Record { + cacheEverything?: boolean; + /** + * A request's cache key is what determines if two requests are + * "the same" for caching purposes. If a request has the same cache key + * as some previous request, then we can serve the same cached response for + * both. (e.g. 'some-key') + * + * Only available for Enterprise customers. + */ + cacheKey?: string; + /** + * This allows you to append additional Cache-Tag response headers + * to the origin response without modifications to the origin server. + * This will allow for greater control over the Purge by Cache Tag feature + * utilizing changes only in the Workers process. + * + * Only available for Enterprise customers. + */ + cacheTags?: string[]; + /** + * Force response to be cached for a given number of seconds. (e.g. 300) + */ + cacheTtl?: number; + /** + * Force response to be cached for a given number of seconds based on the Origin status code. + * (e.g. { '200-299': 86400, '404': 1, '500-599': 0 }) + */ + cacheTtlByStatus?: Record; + scrapeShield?: boolean; + apps?: boolean; + image?: RequestInitCfPropertiesImage; + minify?: RequestInitCfPropertiesImageMinify; + mirage?: boolean; + polish?: 'lossy' | 'lossless' | 'off'; + r2?: RequestInitCfPropertiesR2; + /** + * Redirects the request to an alternate origin server. You can use this, + * for example, to implement load balancing across several origins. + * (e.g.us-east.example.com) + * + * Note - For security reasons, the hostname set in resolveOverride must + * be proxied on the same Cloudflare zone of the incoming request. + * Otherwise, the setting is ignored. CNAME hosts are allowed, so to + * resolve to a host under a different domain or a DNS only domain first + * declare a CNAME record within your own zone’s DNS mapping to the + * external hostname, set proxy on Cloudflare, then set resolveOverride + * to point to that CNAME record. + */ + resolveOverride?: string; +} +interface RequestInitCfPropertiesImageDraw extends BasicImageTransformations { + /** + * Absolute URL of the image file to use for the drawing. It can be any of + * the supported file formats. For drawing of watermarks or non-rectangular + * overlays we recommend using PNG or WebP images. + */ + url: string; + /** + * Floating-point number between 0 (transparent) and 1 (opaque). + * For example, opacity: 0.5 makes overlay semitransparent. + */ + opacity?: number; + /** + * - If set to true, the overlay image will be tiled to cover the entire + * area. This is useful for stock-photo-like watermarks. + * - If set to "x", the overlay image will be tiled horizontally only + * (form a line). + * - If set to "y", the overlay image will be tiled vertically only + * (form a line). + */ + repeat?: true | 'x' | 'y'; + /** + * Position of the overlay image relative to a given edge. Each property is + * an offset in pixels. 0 aligns exactly to the edge. For example, left: 10 + * positions left side of the overlay 10 pixels from the left edge of the + * image it's drawn over. bottom: 0 aligns bottom of the overlay with bottom + * of the background image. + * + * Setting both left & right, or both top & bottom is an error. + * + * If no position is specified, the image will be centered. + */ + top?: number; + left?: number; + bottom?: number; + right?: number; +} +interface RequestInitCfPropertiesImage extends BasicImageTransformations { + /** + * Device Pixel Ratio. Default 1. Multiplier for width/height that makes it + * easier to specify higher-DPI sizes in . + */ + dpr?: number; + /** + * Allows you to trim your image. Takes dpr into account and is performed before + * resizing or rotation. + * + * It can be used as: + * - left, top, right, bottom - it will specify the number of pixels to cut + * off each side + * - width, height - the width/height you'd like to end up with - can be used + * in combination with the properties above + * - border - this will automatically trim the surroundings of an image based on + * it's color. It consists of three properties: + * - color: rgb or hex representation of the color you wish to trim (todo: verify the rgba bit) + * - tolerance: difference from color to treat as color + * - keep: the number of pixels of border to keep + */ + trim?: + | 'border' + | { + top?: number; + bottom?: number; + left?: number; + right?: number; + width?: number; + height?: number; + border?: + | boolean + | { + color?: string; + tolerance?: number; + keep?: number; + }; + }; + /** + * Quality setting from 1-100 (useful values are in 60-90 range). Lower values + * make images look worse, but load faster. The default is 85. It applies only + * to JPEG and WebP images. It doesn’t have any effect on PNG. + */ + quality?: number | 'low' | 'medium-low' | 'medium-high' | 'high'; + /** + * Output format to generate. It can be: + * - avif: generate images in AVIF format. + * - webp: generate images in Google WebP format. Set quality to 100 to get + * the WebP-lossless format. + * - json: instead of generating an image, outputs information about the + * image, in JSON format. The JSON object will contain image size + * (before and after resizing), source image’s MIME type, file size, etc. + * - jpeg: generate images in JPEG format. + * - png: generate images in PNG format. + */ + format?: 'avif' | 'webp' | 'json' | 'jpeg' | 'png' | 'baseline-jpeg' | 'png-force' | 'svg'; + /** + * Whether to preserve animation frames from input files. Default is true. + * Setting it to false reduces animations to still images. This setting is + * recommended when enlarging images or processing arbitrary user content, + * because large GIF animations can weigh tens or even hundreds of megabytes. + * It is also useful to set anim:false when using format:"json" to get the + * response quicker without the number of frames. + */ + anim?: boolean; + /** + * What EXIF data should be preserved in the output image. Note that EXIF + * rotation and embedded color profiles are always applied ("baked in" into + * the image), and aren't affected by this option. Note that if the Polish + * feature is enabled, all metadata may have been removed already and this + * option may have no effect. + * - keep: Preserve most of EXIF metadata, including GPS location if there's + * any. + * - copyright: Only keep the copyright tag, and discard everything else. + * This is the default behavior for JPEG files. + * - none: Discard all invisible EXIF metadata. Currently WebP and PNG + * output formats always discard metadata. + */ + metadata?: 'keep' | 'copyright' | 'none'; + /** + * Strength of sharpening filter to apply to the image. Floating-point + * number between 0 (no sharpening, default) and 10 (maximum). 1.0 is a + * recommended value for downscaled images. + */ + sharpen?: number; + /** + * Radius of a blur filter (approximate gaussian). Maximum supported radius + * is 250. + */ + blur?: number; + /** + * Overlays are drawn in the order they appear in the array (last array + * entry is the topmost layer). + */ + draw?: RequestInitCfPropertiesImageDraw[]; + /** + * Fetching image from authenticated origin. Setting this property will + * pass authentication headers (Authorization, Cookie, etc.) through to + * the origin. + */ + 'origin-auth'?: 'share-publicly'; + /** + * Adds a border around the image. The border is added after resizing. Border + * width takes dpr into account, and can be specified either using a single + * width property, or individually for each side. + */ + border?: + | { + color: string; + width: number; + } + | { + color: string; + top: number; + right: number; + bottom: number; + left: number; + }; + /** + * Increase brightness by a factor. A value of 1.0 equals no change, a value + * of 0.5 equals half brightness, and a value of 2.0 equals twice as bright. + * 0 is ignored. + */ + brightness?: number; + /** + * Increase contrast by a factor. A value of 1.0 equals no change, a value of + * 0.5 equals low contrast, and a value of 2.0 equals high contrast. 0 is + * ignored. + */ + contrast?: number; + /** + * Increase exposure by a factor. A value of 1.0 equals no change, a value of + * 0.5 darkens the image, and a value of 2.0 lightens the image. 0 is ignored. + */ + gamma?: number; + /** + * Increase contrast by a factor. A value of 1.0 equals no change, a value of + * 0.5 equals low contrast, and a value of 2.0 equals high contrast. 0 is + * ignored. + */ + saturation?: number; + /** + * Flips the images horizontally, vertically, or both. Flipping is applied before + * rotation, so if you apply flip=h,rotate=90 then the image will be flipped + * horizontally, then rotated by 90 degrees. + */ + flip?: 'h' | 'v' | 'hv'; + /** + * Slightly reduces latency on a cache miss by selecting a + * quickest-to-compress file format, at a cost of increased file size and + * lower image quality. It will usually override the format option and choose + * JPEG over WebP or AVIF. We do not recommend using this option, except in + * unusual circumstances like resizing uncacheable dynamically-generated + * images. + */ + compression?: 'fast'; +} +interface RequestInitCfPropertiesImageMinify { + javascript?: boolean; + css?: boolean; + html?: boolean; +} +interface RequestInitCfPropertiesR2 { + /** + * Colo id of bucket that an object is stored in + */ + bucketColoId?: number; +} +/** + * Request metadata provided by Cloudflare's edge. + */ +type IncomingRequestCfProperties = IncomingRequestCfPropertiesBase & + IncomingRequestCfPropertiesBotManagementEnterprise & + IncomingRequestCfPropertiesCloudflareForSaaSEnterprise & + IncomingRequestCfPropertiesGeographicInformation & + IncomingRequestCfPropertiesCloudflareAccessOrApiShield; +interface IncomingRequestCfPropertiesBase extends Record { + /** + * [ASN](https://www.iana.org/assignments/as-numbers/as-numbers.xhtml) of the incoming request. + * + * @example 395747 + */ + asn?: number; + /** + * The organization which owns the ASN of the incoming request. + * + * @example "Google Cloud" + */ + asOrganization?: string; + /** + * The original value of the `Accept-Encoding` header if Cloudflare modified it. + * + * @example "gzip, deflate, br" + */ + clientAcceptEncoding?: string; + /** + * The number of milliseconds it took for the request to reach your worker. + * + * @example 22 + */ + clientTcpRtt?: number; + /** + * The three-letter [IATA](https://en.wikipedia.org/wiki/IATA_airport_code) + * airport code of the data center that the request hit. + * + * @example "DFW" + */ + colo: string; + /** + * Represents the upstream's response to a + * [TCP `keepalive` message](https://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html) + * from cloudflare. + * + * For workers with no upstream, this will always be `1`. + * + * @example 3 + */ + edgeRequestKeepAliveStatus: IncomingRequestCfPropertiesEdgeRequestKeepAliveStatus; + /** + * The HTTP Protocol the request used. + * + * @example "HTTP/2" + */ + httpProtocol: string; + /** + * The browser-requested prioritization information in the request object. + * + * If no information was set, defaults to the empty string `""` + * + * @example "weight=192;exclusive=0;group=3;group-weight=127" + * @default "" + */ + requestPriority: string; + /** + * The TLS version of the connection to Cloudflare. + * In requests served over plaintext (without TLS), this property is the empty string `""`. + * + * @example "TLSv1.3" + */ + tlsVersion: string; + /** + * The cipher for the connection to Cloudflare. + * In requests served over plaintext (without TLS), this property is the empty string `""`. + * + * @example "AEAD-AES128-GCM-SHA256" + */ + tlsCipher: string; + /** + * Metadata containing the [`HELLO`](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.2) and [`FINISHED`](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.9) messages from this request's TLS handshake. + * + * If the incoming request was served over plaintext (without TLS) this field is undefined. + */ + tlsExportedAuthenticator?: IncomingRequestCfPropertiesExportedAuthenticatorMetadata; +} +interface IncomingRequestCfPropertiesBotManagementBase { + /** + * Cloudflare’s [level of certainty](https://developers.cloudflare.com/bots/concepts/bot-score/) that a request comes from a bot, + * represented as an integer percentage between `1` (almost certainly a bot) and `99` (almost certainly human). + * + * @example 54 + */ + score: number; + /** + * A boolean value that is true if the request comes from a good bot, like Google or Bing. + * Most customers choose to allow this traffic. For more details, see [Traffic from known bots](https://developers.cloudflare.com/firewall/known-issues-and-faq/#how-does-firewall-rules-handle-traffic-from-known-bots). + */ + verifiedBot: boolean; + /** + * A boolean value that is true if the request originates from a + * Cloudflare-verified proxy service. + */ + corporateProxy: boolean; + /** + * A boolean value that's true if the request matches [file extensions](https://developers.cloudflare.com/bots/reference/static-resources/) for many types of static resources. + */ + staticResource: boolean; + /** + * List of IDs that correlate to the Bot Management heuristic detections made on a request (you can have multiple heuristic detections on the same request). + */ + detectionIds: number[]; +} +interface IncomingRequestCfPropertiesBotManagement { + /** + * Results of Cloudflare's Bot Management analysis + */ + botManagement: IncomingRequestCfPropertiesBotManagementBase; + /** + * Duplicate of `botManagement.score`. + * + * @deprecated + */ + clientTrustScore: number; +} +interface IncomingRequestCfPropertiesBotManagementEnterprise extends IncomingRequestCfPropertiesBotManagement { + /** + * Results of Cloudflare's Bot Management analysis + */ + botManagement: IncomingRequestCfPropertiesBotManagementBase & { + /** + * A [JA3 Fingerprint](https://developers.cloudflare.com/bots/concepts/ja3-fingerprint/) to help profile specific SSL/TLS clients + * across different destination IPs, Ports, and X509 certificates. + */ + ja3Hash: string; + }; +} +interface IncomingRequestCfPropertiesCloudflareForSaaSEnterprise { + /** + * Custom metadata set per-host in [Cloudflare for SaaS](https://developers.cloudflare.com/cloudflare-for-platforms/cloudflare-for-saas/). + * + * This field is only present if you have Cloudflare for SaaS enabled on your account + * and you have followed the [required steps to enable it]((https://developers.cloudflare.com/cloudflare-for-platforms/cloudflare-for-saas/domain-support/custom-metadata/)). + */ + hostMetadata?: HostMetadata; +} +interface IncomingRequestCfPropertiesCloudflareAccessOrApiShield { + /** + * Information about the client certificate presented to Cloudflare. + * + * This is populated when the incoming request is served over TLS using + * either Cloudflare Access or API Shield (mTLS) + * and the presented SSL certificate has a valid + * [Certificate Serial Number](https://ldapwiki.com/wiki/Certificate%20Serial%20Number) + * (i.e., not `null` or `""`). + * + * Otherwise, a set of placeholder values are used. + * + * The property `certPresented` will be set to `"1"` when + * the object is populated (i.e. the above conditions were met). + */ + tlsClientAuth: IncomingRequestCfPropertiesTLSClientAuth | IncomingRequestCfPropertiesTLSClientAuthPlaceholder; +} +/** + * Metadata about the request's TLS handshake + */ +interface IncomingRequestCfPropertiesExportedAuthenticatorMetadata { + /** + * The client's [`HELLO` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.2), encoded in hexadecimal + * + * @example "44372ba35fa1270921d318f34c12f155dc87b682cf36a790cfaa3ba8737a1b5d" + */ + clientHandshake: string; + /** + * The server's [`HELLO` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.2), encoded in hexadecimal + * + * @example "44372ba35fa1270921d318f34c12f155dc87b682cf36a790cfaa3ba8737a1b5d" + */ + serverHandshake: string; + /** + * The client's [`FINISHED` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.9), encoded in hexadecimal + * + * @example "084ee802fe1348f688220e2a6040a05b2199a761f33cf753abb1b006792d3f8b" + */ + clientFinished: string; + /** + * The server's [`FINISHED` message](https://www.rfc-editor.org/rfc/rfc5246#section-7.4.9), encoded in hexadecimal + * + * @example "084ee802fe1348f688220e2a6040a05b2199a761f33cf753abb1b006792d3f8b" + */ + serverFinished: string; +} +/** + * Geographic data about the request's origin. + */ +interface IncomingRequestCfPropertiesGeographicInformation { + /** + * The [ISO 3166-1 Alpha 2](https://www.iso.org/iso-3166-country-codes.html) country code the request originated from. + * + * If your worker is [configured to accept TOR connections](https://support.cloudflare.com/hc/en-us/articles/203306930-Understanding-Cloudflare-Tor-support-and-Onion-Routing), this may also be `"T1"`, indicating a request that originated over TOR. + * + * If Cloudflare is unable to determine where the request originated this property is omitted. + * + * The country code `"T1"` is used for requests originating on TOR. + * + * @example "GB" + */ + country?: Iso3166Alpha2Code | 'T1'; + /** + * If present, this property indicates that the request originated in the EU + * + * @example "1" + */ + isEUCountry?: '1'; + /** + * A two-letter code indicating the continent the request originated from. + * + * @example "AN" + */ + continent?: ContinentCode; + /** + * The city the request originated from + * + * @example "Austin" + */ + city?: string; + /** + * Postal code of the incoming request + * + * @example "78701" + */ + postalCode?: string; + /** + * Latitude of the incoming request + * + * @example "30.27130" + */ + latitude?: string; + /** + * Longitude of the incoming request + * + * @example "-97.74260" + */ + longitude?: string; + /** + * Timezone of the incoming request + * + * @example "America/Chicago" + */ + timezone?: string; + /** + * If known, the ISO 3166-2 name for the first level region associated with + * the IP address of the incoming request + * + * @example "Texas" + */ + region?: string; + /** + * If known, the ISO 3166-2 code for the first-level region associated with + * the IP address of the incoming request + * + * @example "TX" + */ + regionCode?: string; + /** + * Metro code (DMA) of the incoming request + * + * @example "635" + */ + metroCode?: string; +} +/** Data about the incoming request's TLS certificate */ +interface IncomingRequestCfPropertiesTLSClientAuth { + /** Always `"1"`, indicating that the certificate was presented */ + certPresented: '1'; + /** + * Result of certificate verification. + * + * @example "FAILED:self signed certificate" + */ + certVerified: Exclude; + /** The presented certificate's revokation status. + * + * - A value of `"1"` indicates the certificate has been revoked + * - A value of `"0"` indicates the certificate has not been revoked + */ + certRevoked: '1' | '0'; + /** + * The certificate issuer's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html) + * + * @example "CN=cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare" + */ + certIssuerDN: string; + /** + * The certificate subject's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html) + * + * @example "CN=*.cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare" + */ + certSubjectDN: string; + /** + * The certificate issuer's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html) ([RFC 2253](https://www.rfc-editor.org/rfc/rfc2253.html) formatted) + * + * @example "CN=cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare" + */ + certIssuerDNRFC2253: string; + /** + * The certificate subject's [distinguished name](https://knowledge.digicert.com/generalinformation/INFO1745.html) ([RFC 2253](https://www.rfc-editor.org/rfc/rfc2253.html) formatted) + * + * @example "CN=*.cloudflareaccess.com, C=US, ST=Texas, L=Austin, O=Cloudflare" + */ + certSubjectDNRFC2253: string; + /** The certificate issuer's distinguished name (legacy policies) */ + certIssuerDNLegacy: string; + /** The certificate subject's distinguished name (legacy policies) */ + certSubjectDNLegacy: string; + /** + * The certificate's serial number + * + * @example "00936EACBE07F201DF" + */ + certSerial: string; + /** + * The certificate issuer's serial number + * + * @example "2489002934BDFEA34" + */ + certIssuerSerial: string; + /** + * The certificate's Subject Key Identifier + * + * @example "BB:AF:7E:02:3D:FA:A6:F1:3C:84:8E:AD:EE:38:98:EC:D9:32:32:D4" + */ + certSKI: string; + /** + * The certificate issuer's Subject Key Identifier + * + * @example "BB:AF:7E:02:3D:FA:A6:F1:3C:84:8E:AD:EE:38:98:EC:D9:32:32:D4" + */ + certIssuerSKI: string; + /** + * The certificate's SHA-1 fingerprint + * + * @example "6b9109f323999e52259cda7373ff0b4d26bd232e" + */ + certFingerprintSHA1: string; + /** + * The certificate's SHA-256 fingerprint + * + * @example "acf77cf37b4156a2708e34c4eb755f9b5dbbe5ebb55adfec8f11493438d19e6ad3f157f81fa3b98278453d5652b0c1fd1d71e5695ae4d709803a4d3f39de9dea" + */ + certFingerprintSHA256: string; + /** + * The effective starting date of the certificate + * + * @example "Dec 22 19:39:00 2018 GMT" + */ + certNotBefore: string; + /** + * The effective expiration date of the certificate + * + * @example "Dec 22 19:39:00 2018 GMT" + */ + certNotAfter: string; +} +/** Placeholder values for TLS Client Authorization */ +interface IncomingRequestCfPropertiesTLSClientAuthPlaceholder { + certPresented: '0'; + certVerified: 'NONE'; + certRevoked: '0'; + certIssuerDN: ''; + certSubjectDN: ''; + certIssuerDNRFC2253: ''; + certSubjectDNRFC2253: ''; + certIssuerDNLegacy: ''; + certSubjectDNLegacy: ''; + certSerial: ''; + certIssuerSerial: ''; + certSKI: ''; + certIssuerSKI: ''; + certFingerprintSHA1: ''; + certFingerprintSHA256: ''; + certNotBefore: ''; + certNotAfter: ''; +} +/** Possible outcomes of TLS verification */ +declare type CertVerificationStatus = + /** Authentication succeeded */ + | 'SUCCESS' + /** No certificate was presented */ + | 'NONE' + /** Failed because the certificate was self-signed */ + | 'FAILED:self signed certificate' + /** Failed because the certificate failed a trust chain check */ + | 'FAILED:unable to verify the first certificate' + /** Failed because the certificate not yet valid */ + | 'FAILED:certificate is not yet valid' + /** Failed because the certificate is expired */ + | 'FAILED:certificate has expired' + /** Failed for another unspecified reason */ + | 'FAILED'; +/** + * An upstream endpoint's response to a TCP `keepalive` message from Cloudflare. + */ +declare type IncomingRequestCfPropertiesEdgeRequestKeepAliveStatus = + | 0 /** Unknown */ + | 1 /** no keepalives (not found) */ + | 2 /** no connection re-use, opening keepalive connection failed */ + | 3 /** no connection re-use, keepalive accepted and saved */ + | 4 /** connection re-use, refused by the origin server (`TCP FIN`) */ + | 5; /** connection re-use, accepted by the origin server */ +/** ISO 3166-1 Alpha-2 codes */ +declare type Iso3166Alpha2Code = + | 'AD' + | 'AE' + | 'AF' + | 'AG' + | 'AI' + | 'AL' + | 'AM' + | 'AO' + | 'AQ' + | 'AR' + | 'AS' + | 'AT' + | 'AU' + | 'AW' + | 'AX' + | 'AZ' + | 'BA' + | 'BB' + | 'BD' + | 'BE' + | 'BF' + | 'BG' + | 'BH' + | 'BI' + | 'BJ' + | 'BL' + | 'BM' + | 'BN' + | 'BO' + | 'BQ' + | 'BR' + | 'BS' + | 'BT' + | 'BV' + | 'BW' + | 'BY' + | 'BZ' + | 'CA' + | 'CC' + | 'CD' + | 'CF' + | 'CG' + | 'CH' + | 'CI' + | 'CK' + | 'CL' + | 'CM' + | 'CN' + | 'CO' + | 'CR' + | 'CU' + | 'CV' + | 'CW' + | 'CX' + | 'CY' + | 'CZ' + | 'DE' + | 'DJ' + | 'DK' + | 'DM' + | 'DO' + | 'DZ' + | 'EC' + | 'EE' + | 'EG' + | 'EH' + | 'ER' + | 'ES' + | 'ET' + | 'FI' + | 'FJ' + | 'FK' + | 'FM' + | 'FO' + | 'FR' + | 'GA' + | 'GB' + | 'GD' + | 'GE' + | 'GF' + | 'GG' + | 'GH' + | 'GI' + | 'GL' + | 'GM' + | 'GN' + | 'GP' + | 'GQ' + | 'GR' + | 'GS' + | 'GT' + | 'GU' + | 'GW' + | 'GY' + | 'HK' + | 'HM' + | 'HN' + | 'HR' + | 'HT' + | 'HU' + | 'ID' + | 'IE' + | 'IL' + | 'IM' + | 'IN' + | 'IO' + | 'IQ' + | 'IR' + | 'IS' + | 'IT' + | 'JE' + | 'JM' + | 'JO' + | 'JP' + | 'KE' + | 'KG' + | 'KH' + | 'KI' + | 'KM' + | 'KN' + | 'KP' + | 'KR' + | 'KW' + | 'KY' + | 'KZ' + | 'LA' + | 'LB' + | 'LC' + | 'LI' + | 'LK' + | 'LR' + | 'LS' + | 'LT' + | 'LU' + | 'LV' + | 'LY' + | 'MA' + | 'MC' + | 'MD' + | 'ME' + | 'MF' + | 'MG' + | 'MH' + | 'MK' + | 'ML' + | 'MM' + | 'MN' + | 'MO' + | 'MP' + | 'MQ' + | 'MR' + | 'MS' + | 'MT' + | 'MU' + | 'MV' + | 'MW' + | 'MX' + | 'MY' + | 'MZ' + | 'NA' + | 'NC' + | 'NE' + | 'NF' + | 'NG' + | 'NI' + | 'NL' + | 'NO' + | 'NP' + | 'NR' + | 'NU' + | 'NZ' + | 'OM' + | 'PA' + | 'PE' + | 'PF' + | 'PG' + | 'PH' + | 'PK' + | 'PL' + | 'PM' + | 'PN' + | 'PR' + | 'PS' + | 'PT' + | 'PW' + | 'PY' + | 'QA' + | 'RE' + | 'RO' + | 'RS' + | 'RU' + | 'RW' + | 'SA' + | 'SB' + | 'SC' + | 'SD' + | 'SE' + | 'SG' + | 'SH' + | 'SI' + | 'SJ' + | 'SK' + | 'SL' + | 'SM' + | 'SN' + | 'SO' + | 'SR' + | 'SS' + | 'ST' + | 'SV' + | 'SX' + | 'SY' + | 'SZ' + | 'TC' + | 'TD' + | 'TF' + | 'TG' + | 'TH' + | 'TJ' + | 'TK' + | 'TL' + | 'TM' + | 'TN' + | 'TO' + | 'TR' + | 'TT' + | 'TV' + | 'TW' + | 'TZ' + | 'UA' + | 'UG' + | 'UM' + | 'US' + | 'UY' + | 'UZ' + | 'VA' + | 'VC' + | 'VE' + | 'VG' + | 'VI' + | 'VN' + | 'VU' + | 'WF' + | 'WS' + | 'YE' + | 'YT' + | 'ZA' + | 'ZM' + | 'ZW'; +/** The 2-letter continent codes Cloudflare uses */ +declare type ContinentCode = 'AF' | 'AN' | 'AS' | 'EU' | 'NA' | 'OC' | 'SA'; +type CfProperties = IncomingRequestCfProperties | RequestInitCfProperties; +interface D1Meta { + duration: number; + size_after: number; + rows_read: number; + rows_written: number; + last_row_id: number; + changed_db: boolean; + changes: number; + /** + * The region of the database instance that executed the query. + */ + served_by_region?: string; + /** + * The three letters airport code of the colo that executed the query. + */ + served_by_colo?: string; + /** + * True if-and-only-if the database instance that executed the query was the primary. + */ + served_by_primary?: boolean; + timings?: { + /** + * The duration of the SQL query execution by the database instance. It doesn't include any network time. + */ + sql_duration_ms: number; + }; + /** + * Number of total attempts to execute the query, due to automatic retries. + * Note: All other fields in the response like `timings` only apply to the last attempt. + */ + total_attempts?: number; +} +interface D1Response { + success: true; + meta: D1Meta & Record; + error?: never; +} +type D1Result = D1Response & { + results: T[]; +}; +interface D1ExecResult { + count: number; + duration: number; +} +type D1SessionConstraint = + // Indicates that the first query should go to the primary, and the rest queries + // using the same D1DatabaseSession will go to any replica that is consistent with + // the bookmark maintained by the session (returned by the first query). + | 'first-primary' + // Indicates that the first query can go anywhere (primary or replica), and the rest queries + // using the same D1DatabaseSession will go to any replica that is consistent with + // the bookmark maintained by the session (returned by the first query). + | 'first-unconstrained'; +type D1SessionBookmark = string; +declare abstract class D1Database { + prepare(query: string): D1PreparedStatement; + batch(statements: D1PreparedStatement[]): Promise[]>; + exec(query: string): Promise; + /** + * Creates a new D1 Session anchored at the given constraint or the bookmark. + * All queries executed using the created session will have sequential consistency, + * meaning that all writes done through the session will be visible in subsequent reads. + * + * @param constraintOrBookmark Either the session constraint or the explicit bookmark to anchor the created session. + */ + withSession(constraintOrBookmark?: D1SessionBookmark | D1SessionConstraint): D1DatabaseSession; + /** + * @deprecated dump() will be removed soon, only applies to deprecated alpha v1 databases. + */ + dump(): Promise; +} +declare abstract class D1DatabaseSession { + prepare(query: string): D1PreparedStatement; + batch(statements: D1PreparedStatement[]): Promise[]>; + /** + * @returns The latest session bookmark across all executed queries on the session. + * If no query has been executed yet, `null` is returned. + */ + getBookmark(): D1SessionBookmark | null; +} +declare abstract class D1PreparedStatement { + bind(...values: unknown[]): D1PreparedStatement; + first(colName: string): Promise; + first>(): Promise; + run>(): Promise>; + all>(): Promise>; + raw(options: { columnNames: true }): Promise<[string[], ...T[]]>; + raw(options?: { columnNames?: false }): Promise; +} +// `Disposable` was added to TypeScript's standard lib types in version 5.2. +// To support older TypeScript versions, define an empty `Disposable` interface. +// Users won't be able to use `using`/`Symbol.dispose` without upgrading to 5.2, +// but this will ensure type checking on older versions still passes. +// TypeScript's interface merging will ensure our empty interface is effectively +// ignored when `Disposable` is included in the standard lib. +interface Disposable {} +/** + * The returned data after sending an email + */ +interface EmailSendResult { + /** + * The Email Message ID + */ + messageId: string; +} +/** + * An email message that can be sent from a Worker. + */ +interface EmailMessage { + /** + * Envelope From attribute of the email message. + */ + readonly from: string; + /** + * Envelope To attribute of the email message. + */ + readonly to: string; +} +/** + * An email message that is sent to a consumer Worker and can be rejected/forwarded. + */ +interface ForwardableEmailMessage extends EmailMessage { + /** + * Stream of the email message content. + */ + readonly raw: ReadableStream; + /** + * An [Headers object](https://developer.mozilla.org/en-US/docs/Web/API/Headers). + */ + readonly headers: Headers; + /** + * Size of the email message content. + */ + readonly rawSize: number; + /** + * Reject this email message by returning a permanent SMTP error back to the connecting client including the given reason. + * @param reason The reject reason. + * @returns void + */ + setReject(reason: string): void; + /** + * Forward this email message to a verified destination address of the account. + * @param rcptTo Verified destination address. + * @param headers A [Headers object](https://developer.mozilla.org/en-US/docs/Web/API/Headers). + * @returns A promise that resolves when the email message is forwarded. + */ + forward(rcptTo: string, headers?: Headers): Promise; + /** + * Reply to the sender of this email message with a new EmailMessage object. + * @param message The reply message. + * @returns A promise that resolves when the email message is replied. + */ + reply(message: EmailMessage): Promise; +} +/** A file attachment for an email message */ +type EmailAttachment = + | { + disposition: 'inline'; + contentId: string; + filename: string; + type: string; + content: string | ArrayBuffer | ArrayBufferView; + } + | { + disposition: 'attachment'; + contentId?: undefined; + filename: string; + type: string; + content: string | ArrayBuffer | ArrayBufferView; + }; +/** An Email Address */ +interface EmailAddress { + name: string; + email: string; +} +/** + * A binding that allows a Worker to send email messages. + */ +interface SendEmail { + send(message: EmailMessage): Promise; + send(builder: { + from: string | EmailAddress; + to: string | string[]; + subject: string; + replyTo?: string | EmailAddress; + cc?: string | string[]; + bcc?: string | string[]; + headers?: Record; + text?: string; + html?: string; + attachments?: EmailAttachment[]; + }): Promise; +} +declare abstract class EmailEvent extends ExtendableEvent { + readonly message: ForwardableEmailMessage; +} +declare type EmailExportedHandler = ( + message: ForwardableEmailMessage, + env: Env, + ctx: ExecutionContext, +) => void | Promise; +declare module 'cloudflare:email' { + let _EmailMessage: { + prototype: EmailMessage; + new (from: string, to: string, raw: ReadableStream | string): EmailMessage; + }; + export { _EmailMessage as EmailMessage }; +} +/** + * Hello World binding to serve as an explanatory example. DO NOT USE + */ +interface HelloWorldBinding { + /** + * Retrieve the current stored value + */ + get(): Promise<{ + value: string; + ms?: number; + }>; + /** + * Set a new stored value + */ + set(value: string): Promise; +} +interface Hyperdrive { + /** + * Connect directly to Hyperdrive as if it's your database, returning a TCP socket. + * + * Calling this method returns an identical socket to if you call + * `connect("host:port")` using the `host` and `port` fields from this object. + * Pick whichever approach works better with your preferred DB client library. + * + * Note that this socket is not yet authenticated -- it's expected that your + * code (or preferably, the client library of your choice) will authenticate + * using the information in this class's readonly fields. + */ + connect(): Socket; + /** + * A valid DB connection string that can be passed straight into the typical + * client library/driver/ORM. This will typically be the easiest way to use + * Hyperdrive. + */ + readonly connectionString: string; + /* + * A randomly generated hostname that is only valid within the context of the + * currently running Worker which, when passed into `connect()` function from + * the "cloudflare:sockets" module, will connect to the Hyperdrive instance + * for your database. + */ + readonly host: string; + /* + * The port that must be paired the the host field when connecting. + */ + readonly port: number; + /* + * The username to use when authenticating to your database via Hyperdrive. + * Unlike the host and password, this will be the same every time + */ + readonly user: string; + /* + * The randomly generated password to use when authenticating to your + * database via Hyperdrive. Like the host field, this password is only valid + * within the context of the currently running Worker instance from which + * it's read. + */ + readonly password: string; + /* + * The name of the database to connect to. + */ + readonly database: string; +} +// Copyright (c) 2024 Cloudflare, Inc. +// Licensed under the Apache 2.0 license found in the LICENSE file or at: +// https://opensource.org/licenses/Apache-2.0 +type ImageInfoResponse = + | { + format: 'image/svg+xml'; + } + | { + format: string; + fileSize: number; + width: number; + height: number; + }; +type ImageTransform = { + width?: number; + height?: number; + background?: string; + blur?: number; + border?: + | { + color?: string; + width?: number; + } + | { + top?: number; + bottom?: number; + left?: number; + right?: number; + }; + brightness?: number; + contrast?: number; + fit?: 'scale-down' | 'contain' | 'pad' | 'squeeze' | 'cover' | 'crop'; + flip?: 'h' | 'v' | 'hv'; + gamma?: number; + segment?: 'foreground'; + gravity?: + | 'face' + | 'left' + | 'right' + | 'top' + | 'bottom' + | 'center' + | 'auto' + | 'entropy' + | { + x?: number; + y?: number; + mode: 'remainder' | 'box-center'; + }; + rotate?: 0 | 90 | 180 | 270; + saturation?: number; + sharpen?: number; + trim?: + | 'border' + | { + top?: number; + bottom?: number; + left?: number; + right?: number; + width?: number; + height?: number; + border?: + | boolean + | { + color?: string; + tolerance?: number; + keep?: number; + }; + }; +}; +type ImageDrawOptions = { + opacity?: number; + repeat?: boolean | string; + top?: number; + left?: number; + bottom?: number; + right?: number; +}; +type ImageInputOptions = { + encoding?: 'base64'; +}; +type ImageOutputOptions = { + format: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp' | 'image/avif' | 'rgb' | 'rgba'; + quality?: number; + background?: string; + anim?: boolean; +}; +interface ImageMetadata { + id: string; + filename?: string; + uploaded?: string; + requireSignedURLs: boolean; + meta?: Record; + variants: string[]; + draft?: boolean; + creator?: string; +} +interface ImageUploadOptions { + id?: string; + filename?: string; + requireSignedURLs?: boolean; + metadata?: Record; + creator?: string; + encoding?: 'base64'; +} +interface ImageUpdateOptions { + requireSignedURLs?: boolean; + metadata?: Record; + creator?: string; +} +interface ImageListOptions { + limit?: number; + cursor?: string; + sortOrder?: 'asc' | 'desc'; + creator?: string; +} +interface ImageList { + images: ImageMetadata[]; + cursor?: string; + listComplete: boolean; +} +interface HostedImagesBinding { + /** + * Get detailed metadata for a hosted image + * @param imageId The ID of the image (UUID or custom ID) + * @returns Image metadata, or null if not found + */ + details(imageId: string): Promise; + /** + * Get the raw image data for a hosted image + * @param imageId The ID of the image (UUID or custom ID) + * @returns ReadableStream of image bytes, or null if not found + */ + image(imageId: string): Promise | null>; + /** + * Upload a new hosted image + * @param image The image file to upload + * @param options Upload configuration + * @returns Metadata for the uploaded image + * @throws {@link ImagesError} if upload fails + */ + upload(image: ReadableStream | ArrayBuffer, options?: ImageUploadOptions): Promise; + /** + * Update hosted image metadata + * @param imageId The ID of the image + * @param options Properties to update + * @returns Updated image metadata + * @throws {@link ImagesError} if update fails + */ + update(imageId: string, options: ImageUpdateOptions): Promise; + /** + * Delete a hosted image + * @param imageId The ID of the image + * @returns True if deleted, false if not found + */ + delete(imageId: string): Promise; + /** + * List hosted images with pagination + * @param options List configuration + * @returns List of images with pagination info + * @throws {@link ImagesError} if list fails + */ + list(options?: ImageListOptions): Promise; +} +interface ImagesBinding { + /** + * Get image metadata (type, width and height) + * @throws {@link ImagesError} with code 9412 if input is not an image + * @param stream The image bytes + */ + info(stream: ReadableStream, options?: ImageInputOptions): Promise; + /** + * Begin applying a series of transformations to an image + * @param stream The image bytes + * @returns A transform handle + */ + input(stream: ReadableStream, options?: ImageInputOptions): ImageTransformer; + /** + * Access hosted images CRUD operations + */ + readonly hosted: HostedImagesBinding; +} +interface ImageTransformer { + /** + * Apply transform next, returning a transform handle. + * You can then apply more transformations, draw, or retrieve the output. + * @param transform + */ + transform(transform: ImageTransform): ImageTransformer; + /** + * Draw an image on this transformer, returning a transform handle. + * You can then apply more transformations, draw, or retrieve the output. + * @param image The image (or transformer that will give the image) to draw + * @param options The options configuring how to draw the image + */ + draw(image: ReadableStream | ImageTransformer, options?: ImageDrawOptions): ImageTransformer; + /** + * Retrieve the image that results from applying the transforms to the + * provided input + * @param options Options that apply to the output e.g. output format + */ + output(options: ImageOutputOptions): Promise; +} +type ImageTransformationOutputOptions = { + encoding?: 'base64'; +}; +interface ImageTransformationResult { + /** + * The image as a response, ready to store in cache or return to users + */ + response(): Response; + /** + * The content type of the returned image + */ + contentType(): string; + /** + * The bytes of the response + */ + image(options?: ImageTransformationOutputOptions): ReadableStream; +} +interface ImagesError extends Error { + readonly code: number; + readonly message: string; + readonly stack?: string; +} +/** + * Media binding for transforming media streams. + * Provides the entry point for media transformation operations. + */ +interface MediaBinding { + /** + * Creates a media transformer from an input stream. + * @param media - The input media bytes + * @returns A MediaTransformer instance for applying transformations + */ + input(media: ReadableStream): MediaTransformer; +} +/** + * Media transformer for applying transformation operations to media content. + * Handles sizing, fitting, and other input transformation parameters. + */ +interface MediaTransformer { + /** + * Applies transformation options to the media content. + * @param transform - Configuration for how the media should be transformed + * @returns A generator for producing the transformed media output + */ + transform(transform?: MediaTransformationInputOptions): MediaTransformationGenerator; + /** + * Generates the final media output with specified options. + * @param output - Configuration for the output format and parameters + * @returns The final transformation result containing the transformed media + */ + output(output?: MediaTransformationOutputOptions): MediaTransformationResult; +} +/** + * Generator for producing media transformation results. + * Configures the output format and parameters for the transformed media. + */ +interface MediaTransformationGenerator { + /** + * Generates the final media output with specified options. + * @param output - Configuration for the output format and parameters + * @returns The final transformation result containing the transformed media + */ + output(output?: MediaTransformationOutputOptions): MediaTransformationResult; +} +/** + * Result of a media transformation operation. + * Provides multiple ways to access the transformed media content. + */ +interface MediaTransformationResult { + /** + * Returns the transformed media as a readable stream of bytes. + * @returns A promise containing a readable stream with the transformed media + */ + media(): Promise>; + /** + * Returns the transformed media as an HTTP response object. + * @returns The transformed media as a Promise, ready to store in cache or return to users + */ + response(): Promise; + /** + * Returns the MIME type of the transformed media. + * @returns A promise containing the content type string (e.g., 'image/jpeg', 'video/mp4') + */ + contentType(): Promise; +} +/** + * Configuration options for transforming media input. + * Controls how the media should be resized and fitted. + */ +type MediaTransformationInputOptions = { + /** How the media should be resized to fit the specified dimensions */ + fit?: 'contain' | 'cover' | 'scale-down'; + /** Target width in pixels */ + width?: number; + /** Target height in pixels */ + height?: number; +}; +/** + * Configuration options for Media Transformations output. + * Controls the format, timing, and type of the generated output. + */ +type MediaTransformationOutputOptions = { + /** + * Output mode determining the type of media to generate + */ + mode?: 'video' | 'spritesheet' | 'frame' | 'audio'; + /** Whether to include audio in the output */ + audio?: boolean; + /** + * Starting timestamp for frame extraction or start time for clips. (e.g. '2s'). + */ + time?: string; + /** + * Duration for video clips, audio extraction, and spritesheet generation (e.g. '5s'). + */ + duration?: string; + /** + * Number of frames in the spritesheet. + */ + imageCount?: number; + /** + * Output format for the generated media. + */ + format?: 'jpg' | 'png' | 'm4a'; +}; +/** + * Error object for media transformation operations. + * Extends the standard Error interface with additional media-specific information. + */ +interface MediaError extends Error { + readonly code: number; + readonly message: string; + readonly stack?: string; +} +declare module 'cloudflare:node' { + interface NodeStyleServer { + listen(...args: unknown[]): this; + address(): { + port?: number | null | undefined; + }; + } + export function httpServerHandler(port: number): ExportedHandler; + export function httpServerHandler(options: { port: number }): ExportedHandler; + export function httpServerHandler(server: NodeStyleServer): ExportedHandler; +} +type Params

= Record; +type EventContext = { + request: Request>; + functionPath: string; + waitUntil: (promise: Promise) => void; + passThroughOnException: () => void; + next: (input?: Request | string, init?: RequestInit) => Promise; + env: Env & { + ASSETS: { + fetch: typeof fetch; + }; + }; + params: Params

; + data: Data; +}; +type PagesFunction< + Env = unknown, + Params extends string = any, + Data extends Record = Record, +> = (context: EventContext) => Response | Promise; +type EventPluginContext = { + request: Request>; + functionPath: string; + waitUntil: (promise: Promise) => void; + passThroughOnException: () => void; + next: (input?: Request | string, init?: RequestInit) => Promise; + env: Env & { + ASSETS: { + fetch: typeof fetch; + }; + }; + params: Params

; + data: Data; + pluginArgs: PluginArgs; +}; +type PagesPluginFunction< + Env = unknown, + Params extends string = any, + Data extends Record = Record, + PluginArgs = unknown, +> = (context: EventPluginContext) => Response | Promise; +declare module 'assets:*' { + export const onRequest: PagesFunction; +} +// Copyright (c) 2022-2023 Cloudflare, Inc. +// Licensed under the Apache 2.0 license found in the LICENSE file or at: +// https://opensource.org/licenses/Apache-2.0 +declare module 'cloudflare:pipelines' { + export abstract class PipelineTransformationEntrypoint< + Env = unknown, + I extends PipelineRecord = PipelineRecord, + O extends PipelineRecord = PipelineRecord, + > { + protected env: Env; + protected ctx: ExecutionContext; + constructor(ctx: ExecutionContext, env: Env); + /** + * run receives an array of PipelineRecord which can be + * transformed and returned to the pipeline + * @param records Incoming records from the pipeline to be transformed + * @param metadata Information about the specific pipeline calling the transformation entrypoint + * @returns A promise containing the transformed PipelineRecord array + */ + public run(records: I[], metadata: PipelineBatchMetadata): Promise; + } + export type PipelineRecord = Record; + export type PipelineBatchMetadata = { + pipelineId: string; + pipelineName: string; + }; + export interface Pipeline { + /** + * The Pipeline interface represents the type of a binding to a Pipeline + * + * @param records The records to send to the pipeline + */ + send(records: T[]): Promise; + } +} +// PubSubMessage represents an incoming PubSub message. +// The message includes metadata about the broker, the client, and the payload +// itself. +// https://developers.cloudflare.com/pub-sub/ +interface PubSubMessage { + // Message ID + readonly mid: number; + // MQTT broker FQDN in the form mqtts://BROKER.NAMESPACE.cloudflarepubsub.com:PORT + readonly broker: string; + // The MQTT topic the message was sent on. + readonly topic: string; + // The client ID of the client that published this message. + readonly clientId: string; + // The unique identifier (JWT ID) used by the client to authenticate, if token + // auth was used. + readonly jti?: string; + // A Unix timestamp (seconds from Jan 1, 1970), set when the Pub/Sub Broker + // received the message from the client. + readonly receivedAt: number; + // An (optional) string with the MIME type of the payload, if set by the + // client. + readonly contentType: string; + // Set to 1 when the payload is a UTF-8 string + // https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901063 + readonly payloadFormatIndicator: number; + // Pub/Sub (MQTT) payloads can be UTF-8 strings, or byte arrays. + // You can use payloadFormatIndicator to inspect this before decoding. + payload: string | Uint8Array; +} +// JsonWebKey extended by kid parameter +interface JsonWebKeyWithKid extends JsonWebKey { + // Key Identifier of the JWK + readonly kid: string; +} +interface RateLimitOptions { + key: string; +} +interface RateLimitOutcome { + success: boolean; +} +interface RateLimit { + /** + * Rate limit a request based on the provided options. + * @see https://developers.cloudflare.com/workers/runtime-apis/bindings/rate-limit/ + * @returns A promise that resolves with the outcome of the rate limit. + */ + limit(options: RateLimitOptions): Promise; +} +// Namespace for RPC utility types. Unfortunately, we can't use a `module` here as these types need +// to referenced by `Fetcher`. This is included in the "importable" version of the types which +// strips all `module` blocks. +declare namespace Rpc { + // Branded types for identifying `WorkerEntrypoint`/`DurableObject`/`Target`s. + // TypeScript uses *structural* typing meaning anything with the same shape as type `T` is a `T`. + // For the classes exported by `cloudflare:workers` we want *nominal* typing (i.e. we only want to + // accept `WorkerEntrypoint` from `cloudflare:workers`, not any other class with the same shape) + export const __RPC_STUB_BRAND: '__RPC_STUB_BRAND'; + export const __RPC_TARGET_BRAND: '__RPC_TARGET_BRAND'; + export const __WORKER_ENTRYPOINT_BRAND: '__WORKER_ENTRYPOINT_BRAND'; + export const __DURABLE_OBJECT_BRAND: '__DURABLE_OBJECT_BRAND'; + export const __WORKFLOW_ENTRYPOINT_BRAND: '__WORKFLOW_ENTRYPOINT_BRAND'; + export interface RpcTargetBranded { + [__RPC_TARGET_BRAND]: never; + } + export interface WorkerEntrypointBranded { + [__WORKER_ENTRYPOINT_BRAND]: never; + } + export interface DurableObjectBranded { + [__DURABLE_OBJECT_BRAND]: never; + } + export interface WorkflowEntrypointBranded { + [__WORKFLOW_ENTRYPOINT_BRAND]: never; + } + export type EntrypointBranded = WorkerEntrypointBranded | DurableObjectBranded | WorkflowEntrypointBranded; + // Types that can be used through `Stub`s + export type Stubable = RpcTargetBranded | ((...args: any[]) => any); + // Types that can be passed over RPC + // The reason for using a generic type here is to build a serializable subset of structured + // cloneable composite types. This allows types defined with the "interface" keyword to pass the + // serializable check as well. Otherwise, only types defined with the "type" keyword would pass. + type Serializable = + // Structured cloneables + | BaseType + // Structured cloneable composites + | Map< + T extends Map ? Serializable : never, + T extends Map ? Serializable : never + > + | Set ? Serializable : never> + | ReadonlyArray ? Serializable : never> + | { + [K in keyof T]: K extends number | string ? Serializable : never; + } + // Special types + | Stub + // Serialized as stubs, see `Stubify` + | Stubable; + // Base type for all RPC stubs, including common memory management methods. + // `T` is used as a marker type for unwrapping `Stub`s later. + interface StubBase extends Disposable { + [__RPC_STUB_BRAND]: T; + dup(): this; + } + export type Stub = Provider & StubBase; + // This represents all the types that can be sent as-is over an RPC boundary + type BaseType = + | void + | undefined + | null + | boolean + | number + | bigint + | string + | TypedArray + | ArrayBuffer + | DataView + | Date + | Error + | RegExp + | ReadableStream + | WritableStream + | Request + | Response + | Headers; + // Recursively rewrite all `Stubable` types with `Stub`s + // prettier-ignore + type Stubify = T extends Stubable ? Stub : T extends Map ? Map, Stubify> : T extends Set ? Set> : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> : T extends BaseType ? T : T extends { + [key: string | number]: any; + } ? { + [K in keyof T]: Stubify; + } : T; + // Recursively rewrite all `Stub`s with the corresponding `T`s. + // Note we use `StubBase` instead of `Stub` here to avoid circular dependencies: + // `Stub` depends on `Provider`, which depends on `Unstubify`, which would depend on `Stub`. + // prettier-ignore + type Unstubify = T extends StubBase ? V : T extends Map ? Map, Unstubify> : T extends Set ? Set> : T extends Array ? Array> : T extends ReadonlyArray ? ReadonlyArray> : T extends BaseType ? T : T extends { + [key: string | number]: unknown; + } ? { + [K in keyof T]: Unstubify; + } : T; + type UnstubifyAll = { + [I in keyof A]: Unstubify; + }; + // Utility type for adding `Provider`/`Disposable`s to `object` types only. + // Note `unknown & T` is equivalent to `T`. + type MaybeProvider = T extends object ? Provider : unknown; + type MaybeDisposable = T extends object ? Disposable : unknown; + // Type for method return or property on an RPC interface. + // - Stubable types are replaced by stubs. + // - Serializable types are passed by value, with stubable types replaced by stubs + // and a top-level `Disposer`. + // Everything else can't be passed over PRC. + // Technically, we use custom thenables here, but they quack like `Promise`s. + // Intersecting with `(Maybe)Provider` allows pipelining. + // prettier-ignore + type Result = R extends Stubable ? Promise> & Provider : R extends Serializable ? Promise & MaybeDisposable> & MaybeProvider : never; + // Type for method or property on an RPC interface. + // For methods, unwrap `Stub`s in parameters, and rewrite returns to be `Result`s. + // Unwrapping `Stub`s allows calling with `Stubable` arguments. + // For properties, rewrite types to be `Result`s. + // In each case, unwrap `Promise`s. + type MethodOrProperty = V extends (...args: infer P) => infer R + ? (...args: UnstubifyAll

) => Result> + : Result>; + // Type for the callable part of an `Provider` if `T` is callable. + // This is intersected with methods/properties. + type MaybeCallableProvider = T extends (...args: any[]) => any ? MethodOrProperty : unknown; + // Base type for all other types providing RPC-like interfaces. + // Rewrites all methods/properties to be `MethodOrProperty`s, while preserving callable types. + // `Reserved` names (e.g. stub method names like `dup()`) and symbols can't be accessed over RPC. + export type Provider = MaybeCallableProvider & + Pick< + { + [K in keyof T]: MethodOrProperty; + }, + Exclude> + >; +} +declare namespace Cloudflare { + // Type of `env`. + // + // The specific project can extend `Env` by redeclaring it in project-specific files. Typescript + // will merge all declarations. + // + // You can use `wrangler types` to generate the `Env` type automatically. + interface Env {} + // Project-specific parameters used to inform types. + // + // This interface is, again, intended to be declared in project-specific files, and then that + // declaration will be merged with this one. + // + // A project should have a declaration like this: + // + // interface GlobalProps { + // // Declares the main module's exports. Used to populate Cloudflare.Exports aka the type + // // of `ctx.exports`. + // mainModule: typeof import("my-main-module"); + // + // // Declares which of the main module's exports are configured with durable storage, and + // // thus should behave as Durable Object namsepace bindings. + // durableNamespaces: "MyDurableObject" | "AnotherDurableObject"; + // } + // + // You can use `wrangler types` to generate `GlobalProps` automatically. + interface GlobalProps {} + // Evaluates to the type of a property in GlobalProps, defaulting to `Default` if it is not + // present. + type GlobalProp = K extends keyof GlobalProps ? GlobalProps[K] : Default; + // The type of the program's main module exports, if known. Requires `GlobalProps` to declare the + // `mainModule` property. + type MainModule = GlobalProp<'mainModule', {}>; + // The type of ctx.exports, which contains loopback bindings for all top-level exports. + type Exports = { + [K in keyof MainModule]: LoopbackForExport & + // If the export is listed in `durableNamespaces`, then it is also a + // DurableObjectNamespace. + (K extends GlobalProp<'durableNamespaces', never> + ? MainModule[K] extends new (...args: any[]) => infer DoInstance + ? DoInstance extends Rpc.DurableObjectBranded + ? DurableObjectNamespace + : DurableObjectNamespace + : DurableObjectNamespace + : {}); + }; +} +declare namespace CloudflareWorkersModule { + export type RpcStub = Rpc.Stub; + export const RpcStub: { + new (value: T): Rpc.Stub; + }; + export abstract class RpcTarget implements Rpc.RpcTargetBranded { + [Rpc.__RPC_TARGET_BRAND]: never; + } + // `protected` fields don't appear in `keyof`s, so can't be accessed over RPC + export abstract class WorkerEntrypoint implements Rpc.WorkerEntrypointBranded { + [Rpc.__WORKER_ENTRYPOINT_BRAND]: never; + protected ctx: ExecutionContext; + protected env: Env; + constructor(ctx: ExecutionContext, env: Env); + email?(message: ForwardableEmailMessage): void | Promise; + fetch?(request: Request): Response | Promise; + queue?(batch: MessageBatch): void | Promise; + scheduled?(controller: ScheduledController): void | Promise; + tail?(events: TraceItem[]): void | Promise; + tailStream?( + event: TailStream.TailEvent, + ): TailStream.TailEventHandlerType | Promise; + test?(controller: TestController): void | Promise; + trace?(traces: TraceItem[]): void | Promise; + } + export abstract class DurableObject implements Rpc.DurableObjectBranded { + [Rpc.__DURABLE_OBJECT_BRAND]: never; + protected ctx: DurableObjectState; + protected env: Env; + constructor(ctx: DurableObjectState, env: Env); + alarm?(alarmInfo?: AlarmInvocationInfo): void | Promise; + fetch?(request: Request): Response | Promise; + webSocketMessage?(ws: WebSocket, message: string | ArrayBuffer): void | Promise; + webSocketClose?(ws: WebSocket, code: number, reason: string, wasClean: boolean): void | Promise; + webSocketError?(ws: WebSocket, error: unknown): void | Promise; + } + export type WorkflowDurationLabel = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'year'; + export type WorkflowSleepDuration = `${number} ${WorkflowDurationLabel}${'s' | ''}` | number; + export type WorkflowDelayDuration = WorkflowSleepDuration; + export type WorkflowTimeoutDuration = WorkflowSleepDuration; + export type WorkflowRetentionDuration = WorkflowSleepDuration; + export type WorkflowBackoff = 'constant' | 'linear' | 'exponential'; + export type WorkflowStepConfig = { + retries?: { + limit: number; + delay: WorkflowDelayDuration | number; + backoff?: WorkflowBackoff; + }; + timeout?: WorkflowTimeoutDuration | number; + }; + export type WorkflowEvent = { + payload: Readonly; + timestamp: Date; + instanceId: string; + }; + export type WorkflowStepEvent = { + payload: Readonly; + timestamp: Date; + type: string; + }; + export type WorkflowStepContext = { + attempt: number; + }; + export abstract class WorkflowStep { + do>(name: string, callback: (ctx: WorkflowStepContext) => Promise): Promise; + do>( + name: string, + config: WorkflowStepConfig, + callback: (ctx: WorkflowStepContext) => Promise, + ): Promise; + sleep: (name: string, duration: WorkflowSleepDuration) => Promise; + sleepUntil: (name: string, timestamp: Date | number) => Promise; + waitForEvent>( + name: string, + options: { + type: string; + timeout?: WorkflowTimeoutDuration | number; + }, + ): Promise>; + } + export type WorkflowInstanceStatus = + | 'queued' + | 'running' + | 'paused' + | 'errored' + | 'terminated' + | 'complete' + | 'waiting' + | 'waitingForPause' + | 'unknown'; + export abstract class WorkflowEntrypoint | unknown = unknown> + implements Rpc.WorkflowEntrypointBranded + { + [Rpc.__WORKFLOW_ENTRYPOINT_BRAND]: never; + protected ctx: ExecutionContext; + protected env: Env; + constructor(ctx: ExecutionContext, env: Env); + run(event: Readonly>, step: WorkflowStep): Promise; + } + export function waitUntil(promise: Promise): void; + export function withEnv(newEnv: unknown, fn: () => unknown): unknown; + export function withExports(newExports: unknown, fn: () => unknown): unknown; + export function withEnvAndExports(newEnv: unknown, newExports: unknown, fn: () => unknown): unknown; + export const env: Cloudflare.Env; + export const exports: Cloudflare.Exports; +} +declare module 'cloudflare:workers' { + export = CloudflareWorkersModule; +} +interface SecretsStoreSecret { + /** + * Get a secret from the Secrets Store, returning a string of the secret value + * if it exists, or throws an error if it does not exist + */ + get(): Promise; +} +declare module 'cloudflare:sockets' { + function _connect(address: string | SocketAddress, options?: SocketOptions): Socket; + export { _connect as connect }; +} +type MarkdownDocument = { + name: string; + blob: Blob; +}; +type ConversionResponse = + | { + id: string; + name: string; + mimeType: string; + format: 'markdown'; + tokens: number; + data: string; + } + | { + id: string; + name: string; + mimeType: string; + format: 'error'; + error: string; + }; +type ImageConversionOptions = { + descriptionLanguage?: 'en' | 'es' | 'fr' | 'it' | 'pt' | 'de'; +}; +type EmbeddedImageConversionOptions = ImageConversionOptions & { + convert?: boolean; + maxConvertedImages?: number; +}; +type ConversionOptions = { + html?: { + images?: EmbeddedImageConversionOptions & { + convertOGImage?: boolean; + }; + hostname?: string; + cssSelector?: string; + }; + docx?: { + images?: EmbeddedImageConversionOptions; + }; + image?: ImageConversionOptions; + pdf?: { + images?: EmbeddedImageConversionOptions; + metadata?: boolean; + }; +}; +type ConversionRequestOptions = { + gateway?: GatewayOptions; + extraHeaders?: object; + conversionOptions?: ConversionOptions; +}; +type SupportedFileFormat = { + mimeType: string; + extension: string; +}; +declare abstract class ToMarkdownService { + transform(files: MarkdownDocument[], options?: ConversionRequestOptions): Promise; + transform(files: MarkdownDocument, options?: ConversionRequestOptions): Promise; + supported(): Promise; +} +declare namespace TailStream { + interface Header { + readonly name: string; + readonly value: string; + } + interface FetchEventInfo { + readonly type: 'fetch'; + readonly method: string; + readonly url: string; + readonly cfJson?: object; + readonly headers: Header[]; + } + interface JsRpcEventInfo { + readonly type: 'jsrpc'; + } + interface ScheduledEventInfo { + readonly type: 'scheduled'; + readonly scheduledTime: Date; + readonly cron: string; + } + interface AlarmEventInfo { + readonly type: 'alarm'; + readonly scheduledTime: Date; + } + interface QueueEventInfo { + readonly type: 'queue'; + readonly queueName: string; + readonly batchSize: number; + } + interface EmailEventInfo { + readonly type: 'email'; + readonly mailFrom: string; + readonly rcptTo: string; + readonly rawSize: number; + } + interface TraceEventInfo { + readonly type: 'trace'; + readonly traces: (string | null)[]; + } + interface HibernatableWebSocketEventInfoMessage { + readonly type: 'message'; + } + interface HibernatableWebSocketEventInfoError { + readonly type: 'error'; + } + interface HibernatableWebSocketEventInfoClose { + readonly type: 'close'; + readonly code: number; + readonly wasClean: boolean; + } + interface HibernatableWebSocketEventInfo { + readonly type: 'hibernatableWebSocket'; + readonly info: + | HibernatableWebSocketEventInfoClose + | HibernatableWebSocketEventInfoError + | HibernatableWebSocketEventInfoMessage; + } + interface CustomEventInfo { + readonly type: 'custom'; + } + interface FetchResponseInfo { + readonly type: 'fetch'; + readonly statusCode: number; + } + type EventOutcome = + | 'ok' + | 'canceled' + | 'exception' + | 'unknown' + | 'killSwitch' + | 'daemonDown' + | 'exceededCpu' + | 'exceededMemory' + | 'loadShed' + | 'responseStreamDisconnected' + | 'scriptNotFound'; + interface ScriptVersion { + readonly id: string; + readonly tag?: string; + readonly message?: string; + } + interface Onset { + readonly type: 'onset'; + readonly attributes: Attribute[]; + // id for the span being opened by this Onset event. + readonly spanId: string; + readonly dispatchNamespace?: string; + readonly entrypoint?: string; + readonly executionModel: string; + readonly scriptName?: string; + readonly scriptTags?: string[]; + readonly scriptVersion?: ScriptVersion; + readonly info: + | FetchEventInfo + | JsRpcEventInfo + | ScheduledEventInfo + | AlarmEventInfo + | QueueEventInfo + | EmailEventInfo + | TraceEventInfo + | HibernatableWebSocketEventInfo + | CustomEventInfo; + } + interface Outcome { + readonly type: 'outcome'; + readonly outcome: EventOutcome; + readonly cpuTime: number; + readonly wallTime: number; + } + interface SpanOpen { + readonly type: 'spanOpen'; + readonly name: string; + // id for the span being opened by this SpanOpen event. + readonly spanId: string; + readonly info?: FetchEventInfo | JsRpcEventInfo | Attributes; + } + interface SpanClose { + readonly type: 'spanClose'; + readonly outcome: EventOutcome; + } + interface DiagnosticChannelEvent { + readonly type: 'diagnosticChannel'; + readonly channel: string; + readonly message: any; + } + interface Exception { + readonly type: 'exception'; + readonly name: string; + readonly message: string; + readonly stack?: string; + } + interface Log { + readonly type: 'log'; + readonly level: 'debug' | 'error' | 'info' | 'log' | 'warn'; + readonly message: object; + } + interface DroppedEventsDiagnostic { + readonly diagnosticsType: 'droppedEvents'; + readonly count: number; + } + interface StreamDiagnostic { + readonly type: 'streamDiagnostic'; + // To add new diagnostic types, define a new interface and add it to this union type. + readonly diagnostic: DroppedEventsDiagnostic; + } + // This marks the worker handler return information. + // This is separate from Outcome because the worker invocation can live for a long time after + // returning. For example - Websockets that return an http upgrade response but then continue + // streaming information or SSE http connections. + interface Return { + readonly type: 'return'; + readonly info?: FetchResponseInfo; + } + interface Attribute { + readonly name: string; + readonly value: string | string[] | boolean | boolean[] | number | number[] | bigint | bigint[]; + } + interface Attributes { + readonly type: 'attributes'; + readonly info: Attribute[]; + } + type EventType = + | Onset + | Outcome + | SpanOpen + | SpanClose + | DiagnosticChannelEvent + | Exception + | Log + | StreamDiagnostic + | Return + | Attributes; + // Context in which this trace event lives. + interface SpanContext { + // Single id for the entire top-level invocation + // This should be a new traceId for the first worker stage invoked in the eyeball request and then + // same-account service-bindings should reuse the same traceId but cross-account service-bindings + // should use a new traceId. + readonly traceId: string; + // spanId in which this event is handled + // for Onset and SpanOpen events this would be the parent span id + // for Outcome and SpanClose these this would be the span id of the opening Onset and SpanOpen events + // For Hibernate and Mark this would be the span under which they were emitted. + // spanId is not set ONLY if: + // 1. This is an Onset event + // 2. We are not inheriting any SpanContext. (e.g. this is a cross-account service binding or a new top-level invocation) + readonly spanId?: string; + } + interface TailEvent { + // invocation id of the currently invoked worker stage. + // invocation id will always be unique to every Onset event and will be the same until the Outcome event. + readonly invocationId: string; + // Inherited spanContext for this event. + readonly spanContext: SpanContext; + readonly timestamp: Date; + readonly sequence: number; + readonly event: Event; + } + type TailEventHandler = (event: TailEvent) => void | Promise; + type TailEventHandlerObject = { + outcome?: TailEventHandler; + spanOpen?: TailEventHandler; + spanClose?: TailEventHandler; + diagnosticChannel?: TailEventHandler; + exception?: TailEventHandler; + log?: TailEventHandler; + return?: TailEventHandler; + attributes?: TailEventHandler; + }; + type TailEventHandlerType = TailEventHandler | TailEventHandlerObject; +} +// Copyright (c) 2022-2023 Cloudflare, Inc. +// Licensed under the Apache 2.0 license found in the LICENSE file or at: +// https://opensource.org/licenses/Apache-2.0 +/** + * Data types supported for holding vector metadata. + */ +type VectorizeVectorMetadataValue = string | number | boolean | string[]; +/** + * Additional information to associate with a vector. + */ +type VectorizeVectorMetadata = VectorizeVectorMetadataValue | Record; +type VectorFloatArray = Float32Array | Float64Array; +interface VectorizeError { + code?: number; + error: string; +} +/** + * Comparison logic/operation to use for metadata filtering. + * + * This list is expected to grow as support for more operations are released. + */ +type VectorizeVectorMetadataFilterOp = '$eq' | '$ne' | '$lt' | '$lte' | '$gt' | '$gte'; +type VectorizeVectorMetadataFilterCollectionOp = '$in' | '$nin'; +/** + * Filter criteria for vector metadata used to limit the retrieved query result set. + */ +type VectorizeVectorMetadataFilter = { + [field: string]: + | Exclude + | null + | { + [Op in VectorizeVectorMetadataFilterOp]?: Exclude | null; + } + | { + [Op in VectorizeVectorMetadataFilterCollectionOp]?: Exclude[]; + }; +}; +/** + * Supported distance metrics for an index. + * Distance metrics determine how other "similar" vectors are determined. + */ +type VectorizeDistanceMetric = 'euclidean' | 'cosine' | 'dot-product'; +/** + * Metadata return levels for a Vectorize query. + * + * Default to "none". + * + * @property all Full metadata for the vector return set, including all fields (including those un-indexed) without truncation. This is a more expensive retrieval, as it requires additional fetching & reading of un-indexed data. + * @property indexed Return all metadata fields configured for indexing in the vector return set. This level of retrieval is "free" in that no additional overhead is incurred returning this data. However, note that indexed metadata is subject to truncation (especially for larger strings). + * @property none No indexed metadata will be returned. + */ +type VectorizeMetadataRetrievalLevel = 'all' | 'indexed' | 'none'; +interface VectorizeQueryOptions { + topK?: number; + namespace?: string; + returnValues?: boolean; + returnMetadata?: boolean | VectorizeMetadataRetrievalLevel; + filter?: VectorizeVectorMetadataFilter; +} +/** + * Information about the configuration of an index. + */ +type VectorizeIndexConfig = + | { + dimensions: number; + metric: VectorizeDistanceMetric; + } + | { + preset: string; // keep this generic, as we'll be adding more presets in the future and this is only in a read capacity + }; +/** + * Metadata about an existing index. + * + * This type is exclusively for the Vectorize **beta** and will be deprecated once Vectorize RC is released. + * See {@link VectorizeIndexInfo} for its post-beta equivalent. + */ +interface VectorizeIndexDetails { + /** The unique ID of the index */ + readonly id: string; + /** The name of the index. */ + name: string; + /** (optional) A human readable description for the index. */ + description?: string; + /** The index configuration, including the dimension size and distance metric. */ + config: VectorizeIndexConfig; + /** The number of records containing vectors within the index. */ + vectorsCount: number; +} +/** + * Metadata about an existing index. + */ +interface VectorizeIndexInfo { + /** The number of records containing vectors within the index. */ + vectorCount: number; + /** Number of dimensions the index has been configured for. */ + dimensions: number; + /** ISO 8601 datetime of the last processed mutation on in the index. All changes before this mutation will be reflected in the index state. */ + processedUpToDatetime: number; + /** UUIDv4 of the last mutation processed by the index. All changes before this mutation will be reflected in the index state. */ + processedUpToMutation: number; +} +/** + * Represents a single vector value set along with its associated metadata. + */ +interface VectorizeVector { + /** The ID for the vector. This can be user-defined, and must be unique. It should uniquely identify the object, and is best set based on the ID of what the vector represents. */ + id: string; + /** The vector values */ + values: VectorFloatArray | number[]; + /** The namespace this vector belongs to. */ + namespace?: string; + /** Metadata associated with the vector. Includes the values of other fields and potentially additional details. */ + metadata?: Record; +} +/** + * Represents a matched vector for a query along with its score and (if specified) the matching vector information. + */ +type VectorizeMatch = Pick, 'values'> & + Omit & { + /** The score or rank for similarity, when returned as a result */ + score: number; + }; +/** + * A set of matching {@link VectorizeMatch} for a particular query. + */ +interface VectorizeMatches { + matches: VectorizeMatch[]; + count: number; +} +/** + * Results of an operation that performed a mutation on a set of vectors. + * Here, `ids` is a list of vectors that were successfully processed. + * + * This type is exclusively for the Vectorize **beta** and will be deprecated once Vectorize RC is released. + * See {@link VectorizeAsyncMutation} for its post-beta equivalent. + */ +interface VectorizeVectorMutation { + /* List of ids of vectors that were successfully processed. */ + ids: string[]; + /* Total count of the number of processed vectors. */ + count: number; +} +/** + * Result type indicating a mutation on the Vectorize Index. + * Actual mutations are processed async where the `mutationId` is the unique identifier for the operation. + */ +interface VectorizeAsyncMutation { + /** The unique identifier for the async mutation operation containing the changeset. */ + mutationId: string; +} +/** + * A Vectorize Vector Search Index for querying vectors/embeddings. + * + * This type is exclusively for the Vectorize **beta** and will be deprecated once Vectorize RC is released. + * See {@link Vectorize} for its new implementation. + */ +declare abstract class VectorizeIndex { + /** + * Get information about the currently bound index. + * @returns A promise that resolves with information about the current index. + */ + public describe(): Promise; + /** + * Use the provided vector to perform a similarity search across the index. + * @param vector Input vector that will be used to drive the similarity search. + * @param options Configuration options to massage the returned data. + * @returns A promise that resolves with matched and scored vectors. + */ + public query(vector: VectorFloatArray | number[], options?: VectorizeQueryOptions): Promise; + /** + * Insert a list of vectors into the index dataset. If a provided id exists, an error will be thrown. + * @param vectors List of vectors that will be inserted. + * @returns A promise that resolves with the ids & count of records that were successfully processed. + */ + public insert(vectors: VectorizeVector[]): Promise; + /** + * Upsert a list of vectors into the index dataset. If a provided id exists, it will be replaced with the new values. + * @param vectors List of vectors that will be upserted. + * @returns A promise that resolves with the ids & count of records that were successfully processed. + */ + public upsert(vectors: VectorizeVector[]): Promise; + /** + * Delete a list of vectors with a matching id. + * @param ids List of vector ids that should be deleted. + * @returns A promise that resolves with the ids & count of records that were successfully processed (and thus deleted). + */ + public deleteByIds(ids: string[]): Promise; + /** + * Get a list of vectors with a matching id. + * @param ids List of vector ids that should be returned. + * @returns A promise that resolves with the raw unscored vectors matching the id set. + */ + public getByIds(ids: string[]): Promise; +} +/** + * A Vectorize Vector Search Index for querying vectors/embeddings. + * + * Mutations in this version are async, returning a mutation id. + */ +declare abstract class Vectorize { + /** + * Get information about the currently bound index. + * @returns A promise that resolves with information about the current index. + */ + public describe(): Promise; + /** + * Use the provided vector to perform a similarity search across the index. + * @param vector Input vector that will be used to drive the similarity search. + * @param options Configuration options to massage the returned data. + * @returns A promise that resolves with matched and scored vectors. + */ + public query(vector: VectorFloatArray | number[], options?: VectorizeQueryOptions): Promise; + /** + * Use the provided vector-id to perform a similarity search across the index. + * @param vectorId Id for a vector in the index against which the index should be queried. + * @param options Configuration options to massage the returned data. + * @returns A promise that resolves with matched and scored vectors. + */ + public queryById(vectorId: string, options?: VectorizeQueryOptions): Promise; + /** + * Insert a list of vectors into the index dataset. If a provided id exists, an error will be thrown. + * @param vectors List of vectors that will be inserted. + * @returns A promise that resolves with a unique identifier of a mutation containing the insert changeset. + */ + public insert(vectors: VectorizeVector[]): Promise; + /** + * Upsert a list of vectors into the index dataset. If a provided id exists, it will be replaced with the new values. + * @param vectors List of vectors that will be upserted. + * @returns A promise that resolves with a unique identifier of a mutation containing the upsert changeset. + */ + public upsert(vectors: VectorizeVector[]): Promise; + /** + * Delete a list of vectors with a matching id. + * @param ids List of vector ids that should be deleted. + * @returns A promise that resolves with a unique identifier of a mutation containing the delete changeset. + */ + public deleteByIds(ids: string[]): Promise; + /** + * Get a list of vectors with a matching id. + * @param ids List of vector ids that should be returned. + * @returns A promise that resolves with the raw unscored vectors matching the id set. + */ + public getByIds(ids: string[]): Promise; +} +/** + * The interface for "version_metadata" binding + * providing metadata about the Worker Version using this binding. + */ +type WorkerVersionMetadata = { + /** The ID of the Worker Version using this binding */ + id: string; + /** The tag of the Worker Version using this binding */ + tag: string; + /** The timestamp of when the Worker Version was uploaded */ + timestamp: string; +}; +interface DynamicDispatchLimits { + /** + * Limit CPU time in milliseconds. + */ + cpuMs?: number; + /** + * Limit number of subrequests. + */ + subRequests?: number; +} +interface DynamicDispatchOptions { + /** + * Limit resources of invoked Worker script. + */ + limits?: DynamicDispatchLimits; + /** + * Arguments for outbound Worker script, if configured. + */ + outbound?: { + [key: string]: any; + }; +} +interface DispatchNamespace { + /** + * @param name Name of the Worker script. + * @param args Arguments to Worker script. + * @param options Options for Dynamic Dispatch invocation. + * @returns A Fetcher object that allows you to send requests to the Worker script. + * @throws If the Worker script does not exist in this dispatch namespace, an error will be thrown. + */ + get( + name: string, + args?: { + [key: string]: any; + }, + options?: DynamicDispatchOptions, + ): Fetcher; +} +declare module 'cloudflare:workflows' { + /** + * NonRetryableError allows for a user to throw a fatal error + * that makes a Workflow instance fail immediately without triggering a retry + */ + export class NonRetryableError extends Error { + public constructor(message: string, name?: string); + } +} +declare abstract class Workflow { + /** + * Get a handle to an existing instance of the Workflow. + * @param id Id for the instance of this Workflow + * @returns A promise that resolves with a handle for the Instance + */ + public get(id: string): Promise; + /** + * Create a new instance and return a handle to it. If a provided id exists, an error will be thrown. + * @param options Options when creating an instance including id and params + * @returns A promise that resolves with a handle for the Instance + */ + public create(options?: WorkflowInstanceCreateOptions): Promise; + /** + * Create a batch of instances and return handle for all of them. If a provided id exists, an error will be thrown. + * `createBatch` is limited at 100 instances at a time or when the RPC limit for the batch (1MiB) is reached. + * @param batch List of Options when creating an instance including name and params + * @returns A promise that resolves with a list of handles for the created instances. + */ + public createBatch(batch: WorkflowInstanceCreateOptions[]): Promise; +} +type WorkflowDurationLabel = 'second' | 'minute' | 'hour' | 'day' | 'week' | 'month' | 'year'; +type WorkflowSleepDuration = `${number} ${WorkflowDurationLabel}${'s' | ''}` | number; +type WorkflowRetentionDuration = WorkflowSleepDuration; +interface WorkflowInstanceCreateOptions { + /** + * An id for your Workflow instance. Must be unique within the Workflow. + */ + id?: string; + /** + * The event payload the Workflow instance is triggered with + */ + params?: PARAMS; + /** + * The retention policy for Workflow instance. + * Defaults to the maximum retention period available for the owner's account. + */ + retention?: { + successRetention?: WorkflowRetentionDuration; + errorRetention?: WorkflowRetentionDuration; + }; +} +type InstanceStatus = { + status: + | 'queued' // means that instance is waiting to be started (see concurrency limits) + | 'running' + | 'paused' + | 'errored' + | 'terminated' // user terminated the instance while it was running + | 'complete' + | 'waiting' // instance is hibernating and waiting for sleep or event to finish + | 'waitingForPause' // instance is finishing the current work to pause + | 'unknown'; + error?: { + name: string; + message: string; + }; + output?: unknown; +}; +interface WorkflowError { + code?: number; + message: string; +} +declare abstract class WorkflowInstance { + public id: string; + /** + * Pause the instance. + */ + public pause(): Promise; + /** + * Resume the instance. If it is already running, an error will be thrown. + */ + public resume(): Promise; + /** + * Terminate the instance. If it is errored, terminated or complete, an error will be thrown. + */ + public terminate(): Promise; + /** + * Restart the instance. + */ + public restart(): Promise; + /** + * Returns the current status of the instance. + */ + public status(): Promise; + /** + * Send an event to this instance. + */ + public sendEvent({ type, payload }: { type: string; payload: unknown }): Promise; +} diff --git a/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/wrangler.jsonc b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/wrangler.jsonc new file mode 100644 index 000000000000..eed026ecf585 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6-cf-workers/wrangler.jsonc @@ -0,0 +1,13 @@ +{ + "compatibility_date": "2026-03-12", + "compatibility_flags": ["global_fetch_strictly_public", "nodejs_compat"], + "name": "astro-6-cf-workers", + "main": "./sentry.server.config.js", + "assets": { + "directory": "./dist", + "binding": "ASSETS", + }, + "observability": { + "enabled": true, + }, +} diff --git a/dev-packages/e2e-tests/test-applications/astro-6/.gitignore b/dev-packages/e2e-tests/test-applications/astro-6/.gitignore new file mode 100644 index 000000000000..560782d47d98 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/.gitignore @@ -0,0 +1,26 @@ +# build output +dist/ + +# generated types +.astro/ + +# dependencies +node_modules/ + +# logs +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* + +# environment variables +.env +.env.production + +# macOS-specific files +.DS_Store + +# jetbrains setting folder +.idea/ + +test-results diff --git a/dev-packages/e2e-tests/test-applications/astro-6/.npmrc b/dev-packages/e2e-tests/test-applications/astro-6/.npmrc new file mode 100644 index 000000000000..070f80f05092 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/.npmrc @@ -0,0 +1,2 @@ +@sentry:registry=http://127.0.0.1:4873 +@sentry-internal:registry=http://127.0.0.1:4873 diff --git a/dev-packages/e2e-tests/test-applications/astro-6/README.md b/dev-packages/e2e-tests/test-applications/astro-6/README.md new file mode 100644 index 000000000000..ff19a3e7ece8 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/README.md @@ -0,0 +1,48 @@ +# Astro Starter Kit: Basics + +```sh +npm create astro@latest -- --template basics +``` + +[![Open in StackBlitz](https://developer.stackblitz.com/img/open_in_stackblitz.svg)](https://stackblitz.com/github/withastro/astro/tree/latest/examples/basics) +[![Open with CodeSandbox](https://assets.codesandbox.io/github/button-edit-lime.svg)](https://codesandbox.io/p/sandbox/github/withastro/astro/tree/latest/examples/basics) +[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/withastro/astro?devcontainer_path=.devcontainer/basics/devcontainer.json) + +> 🧑‍🚀 **Seasoned astronaut?** Delete this file. Have fun! + +![just-the-basics](https://github.com/withastro/astro/assets/2244813/a0a5533c-a856-4198-8470-2d67b1d7c554) + +## 🚀 Project Structure + +Inside of your Astro project, you'll see the following folders and files: + +```text +/ +├── public/ +│ └── favicon.svg +├── src/ +│ ├── layouts/ +│ │ └── Layout.astro +│ └── pages/ +│ └── index.astro +└── package.json +``` + +To learn more about the folder structure of an Astro project, refer to [our guide on project structure](https://docs.astro.build/en/basics/project-structure/). + +## 🧞 Commands + +All commands are run from the root of the project, from a terminal: + +| Command | Action | +| :------------------------ | :----------------------------------------------- | +| `npm install` | Installs dependencies | +| `npm run dev` | Starts local dev server at `localhost:4321` | +| `npm run build` | Build your production site to `./dist/` | +| `npm run preview` | Preview your build locally, before deploying | +| `npm run astro ...` | Run CLI commands like `astro add`, `astro check` | +| `npm run astro -- --help` | Get help using the Astro CLI | + +## 👀 Want to learn more? + +Feel free to check [our documentation](https://docs.astro.build) or jump into our [Discord server](https://astro.build/chat). diff --git a/dev-packages/e2e-tests/test-applications/astro-6/astro.config.mjs b/dev-packages/e2e-tests/test-applications/astro-6/astro.config.mjs new file mode 100644 index 000000000000..234a57fca662 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/astro.config.mjs @@ -0,0 +1,24 @@ +import sentry from '@sentry/astro'; +// @ts-check +import { defineConfig } from 'astro/config'; + +import node from '@astrojs/node'; + +// https://astro.build/config +export default defineConfig({ + integrations: [ + sentry({ + debug: true, + sourceMapsUploadOptions: { + enabled: false, + }, + }), + ], + output: 'server', + security: { + allowedDomains: [{ hostname: 'localhost' }], + }, + adapter: node({ + mode: 'standalone', + }), +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6/package.json b/dev-packages/e2e-tests/test-applications/astro-6/package.json new file mode 100644 index 000000000000..e97314a949b4 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/package.json @@ -0,0 +1,25 @@ +{ + "name": "astro-6", + "type": "module", + "version": "0.0.1", + "scripts": { + "dev": "astro dev", + "build": "astro build", + "preview": "astro preview", + "astro": "astro", + "start": "node ./dist/server/entry.mjs", + "test:build": "pnpm install && pnpm build", + "test:assert": "TEST_ENV=production playwright test" + }, + "dependencies": { + "@astrojs/node": "^10.0.0", + "@playwright/test": "~1.56.0", + "@sentry-internal/test-utils": "link:../../../test-utils", + "@sentry/astro": "latest || *", + "astro": "^6.0.0" + }, + "volta": { + "node": "22.22.0", + "extends": "../../package.json" + } +} diff --git a/dev-packages/e2e-tests/test-applications/astro-6/playwright.config.mjs b/dev-packages/e2e-tests/test-applications/astro-6/playwright.config.mjs new file mode 100644 index 000000000000..ae58e4ff3ddc --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/playwright.config.mjs @@ -0,0 +1,13 @@ +import { getPlaywrightConfig } from '@sentry-internal/test-utils'; + +const testEnv = process.env.TEST_ENV; + +if (!testEnv) { + throw new Error('No test env defined'); +} + +const config = getPlaywrightConfig({ + startCommand: 'pnpm start', +}); + +export default config; diff --git a/dev-packages/e2e-tests/test-applications/astro-6/public/favicon.svg b/dev-packages/e2e-tests/test-applications/astro-6/public/favicon.svg new file mode 100644 index 000000000000..f157bd1c5e28 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/public/favicon.svg @@ -0,0 +1,9 @@ + + + + diff --git a/dev-packages/e2e-tests/test-applications/astro-6/sentry.client.config.js b/dev-packages/e2e-tests/test-applications/astro-6/sentry.client.config.js new file mode 100644 index 000000000000..83573d36d0be --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/sentry.client.config.js @@ -0,0 +1,22 @@ +import * as Sentry from '@sentry/astro'; + +Sentry.init({ + dsn: import.meta.env.PUBLIC_E2E_TEST_DSN, + environment: 'qa', + tracesSampleRate: 1.0, + tunnel: 'http://localhost:3031/', // proxy server + integrations: [ + Sentry.browserTracingIntegration({ + beforeStartSpan: opts => { + if (opts.name.startsWith('/blog/')) { + return { + ...opts, + name: window.location.pathname, + }; + } + return opts; + }, + }), + ], + debug: true, +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6/sentry.server.config.js b/dev-packages/e2e-tests/test-applications/astro-6/sentry.server.config.js new file mode 100644 index 000000000000..bc90470cef38 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/sentry.server.config.js @@ -0,0 +1,9 @@ +import * as Sentry from '@sentry/astro'; + +Sentry.init({ + dsn: import.meta.env.PUBLIC_E2E_TEST_DSN, + environment: 'qa', + tracesSampleRate: 1.0, + tunnel: 'http://localhost:3031/', // proxy server + debug: true, +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/assets/astro.svg b/dev-packages/e2e-tests/test-applications/astro-6/src/assets/astro.svg new file mode 100644 index 000000000000..8cf8fb0c7da6 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/assets/astro.svg @@ -0,0 +1 @@ + diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/assets/background.svg b/dev-packages/e2e-tests/test-applications/astro-6/src/assets/background.svg new file mode 100644 index 000000000000..4b2be0ac0e47 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/assets/background.svg @@ -0,0 +1 @@ + diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/components/Avatar.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/components/Avatar.astro new file mode 100644 index 000000000000..5611579efaf1 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/components/Avatar.astro @@ -0,0 +1,5 @@ +--- + +--- + +User avatar diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/components/Welcome.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/components/Welcome.astro new file mode 100644 index 000000000000..6f862e767574 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/components/Welcome.astro @@ -0,0 +1,205 @@ +--- +import astroLogo from '../assets/astro.svg'; +import background from '../assets/background.svg'; +--- + +

+ + diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/layouts/Layout.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/layouts/Layout.astro new file mode 100644 index 000000000000..6105f48ffd35 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/layouts/Layout.astro @@ -0,0 +1,22 @@ + + + + + + + + Astro Basics + + + + + + + diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/api/user/[userId].json.js b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/api/user/[userId].json.js new file mode 100644 index 000000000000..481c8979dc89 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/api/user/[userId].json.js @@ -0,0 +1,8 @@ +export function GET({ params }) { + return new Response( + JSON.stringify({ + greeting: `Hello ${params.userId}`, + userId: params.userId, + }), + ); +} diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/blog/[slug].astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/blog/[slug].astro new file mode 100644 index 000000000000..b776fa25c494 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/blog/[slug].astro @@ -0,0 +1,11 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = false; + +const { slug } = Astro.params; +--- + + +

Blog post: {slug}

+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/catchAll/[...path].astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/catchAll/[...path].astro new file mode 100644 index 000000000000..9fe2bdab5c15 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/catchAll/[...path].astro @@ -0,0 +1,11 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = false; + +const params = Astro.params; +--- + + +

params: {params}

+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/client-error/index.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/client-error/index.astro new file mode 100644 index 000000000000..492524e2a713 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/client-error/index.astro @@ -0,0 +1,7 @@ +--- +import Layout from '../../layouts/Layout.astro'; +--- + + + + diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/endpoint-error/api.ts b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/endpoint-error/api.ts new file mode 100644 index 000000000000..a76accdba010 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/endpoint-error/api.ts @@ -0,0 +1,15 @@ +import type { APIRoute } from 'astro'; + +export const prerender = false; + +export const GET: APIRoute = ({ request, url }) => { + if (url.searchParams.has('error')) { + throw new Error('Endpoint Error'); + } + return new Response( + JSON.stringify({ + search: url.search, + sp: url.searchParams, + }), + ); +}; diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/endpoint-error/index.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/endpoint-error/index.astro new file mode 100644 index 000000000000..ecfb0641144e --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/endpoint-error/index.astro @@ -0,0 +1,9 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = false; +--- + + + + diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/index.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/index.astro new file mode 100644 index 000000000000..7032437764f8 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/index.astro @@ -0,0 +1,23 @@ +--- +import Welcome from '../components/Welcome.astro'; +import Layout from '../layouts/Layout.astro'; + +// Welcome to Astro! Wondering what to do next? Check out the Astro documentation at https://docs.astro.build +// Don't want to use any of this? Delete everything in this file, the `assets`, `components`, and `layouts` directories, and start fresh. +--- + + +
+

Astro E2E Test App

+ +
+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/server-island/index.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/server-island/index.astro new file mode 100644 index 000000000000..0e922af4667f --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/server-island/index.astro @@ -0,0 +1,14 @@ +--- +import Avatar from '../../components/Avatar.astro'; +import Layout from '../../layouts/Layout.astro'; + +export const prerender = true; +--- + + +

This page is static, except for the avatar which is loaded dynamically from the server

+ + +

Fallback

+
+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/ssr-error/index.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/ssr-error/index.astro new file mode 100644 index 000000000000..fc42bcbae4f7 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/ssr-error/index.astro @@ -0,0 +1,11 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +const a = {} as any; +console.log(a.foo.x); +export const prerender = false; +--- + + +

Page with SSR error

+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/test-ssr/index.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/test-ssr/index.astro new file mode 100644 index 000000000000..4531c20c05ad --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/test-ssr/index.astro @@ -0,0 +1,11 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = false; +--- + + +

This is a server page

+ + +
diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/test-static/index.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/test-static/index.astro new file mode 100644 index 000000000000..c0fd701d4a2a --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/test-static/index.astro @@ -0,0 +1,11 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = true; +--- + + +

This is a static page

+ + +
diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/user-page/[userId].astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/user-page/[userId].astro new file mode 100644 index 000000000000..8050e386a39f --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/user-page/[userId].astro @@ -0,0 +1,16 @@ +--- +import Layout from '../../layouts/Layout.astro'; + +export const prerender = false; + +const { userId } = Astro.params; + +const response = await fetch(Astro.url.origin + `/api/user/${userId}.json`); +const data = await response.json(); +--- + + +

{data.greeting}

+ +

data: {JSON.stringify(data)}

+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6/src/pages/user-page/settings.astro b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/user-page/settings.astro new file mode 100644 index 000000000000..8260e632c07b --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/src/pages/user-page/settings.astro @@ -0,0 +1,7 @@ +--- +import Layout from '../../layouts/Layout.astro'; +--- + + +

User Settings

+
diff --git a/dev-packages/e2e-tests/test-applications/astro-6/start-event-proxy.mjs b/dev-packages/e2e-tests/test-applications/astro-6/start-event-proxy.mjs new file mode 100644 index 000000000000..af51909eb9b6 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/start-event-proxy.mjs @@ -0,0 +1,6 @@ +import { startEventProxyServer } from '@sentry-internal/test-utils'; + +startEventProxyServer({ + port: 3031, + proxyServerName: 'astro-6', +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6/tests/errors.client.test.ts b/dev-packages/e2e-tests/test-applications/astro-6/tests/errors.client.test.ts new file mode 100644 index 000000000000..f198fd27a58e --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/tests/errors.client.test.ts @@ -0,0 +1,79 @@ +import { expect, test } from '@playwright/test'; +import { waitForError } from '@sentry-internal/test-utils'; + +test.describe('client-side errors', () => { + test('captures error thrown on click', async ({ page }) => { + const errorEventPromise = waitForError('astro-6', errorEvent => { + return errorEvent?.exception?.values?.[0]?.value === 'client error'; + }); + + await page.goto('/client-error'); + + await page.getByText('Throw Error').click(); + + const errorEvent = await errorEventPromise; + + const errorEventFrames = errorEvent.exception?.values?.[0]?.stacktrace?.frames; + + expect(errorEventFrames?.[errorEventFrames?.length - 1]).toEqual( + expect.objectContaining({ + colno: expect.any(Number), + lineno: expect.any(Number), + filename: expect.stringContaining('/client-error'), + function: 'HTMLButtonElement.onclick', + in_app: true, + }), + ); + + expect(errorEvent).toMatchObject({ + exception: { + values: [ + { + mechanism: { + handled: false, + type: 'auto.browser.global_handlers.onerror', + }, + type: 'Error', + value: 'client error', + stacktrace: expect.any(Object), // detailed check above + }, + ], + }, + level: 'error', + platform: 'javascript', + request: { + url: expect.stringContaining('/client-error'), + headers: { + 'User-Agent': expect.any(String), + }, + }, + event_id: expect.stringMatching(/[a-f0-9]{32}/), + timestamp: expect.any(Number), + sdk: { + integrations: expect.arrayContaining([ + 'InboundFilters', + 'FunctionToString', + 'BrowserApiErrors', + 'Breadcrumbs', + 'GlobalHandlers', + 'LinkedErrors', + 'Dedupe', + 'HttpContext', + 'BrowserSession', + 'BrowserTracing', + ]), + name: 'sentry.javascript.astro', + version: expect.any(String), + packages: expect.any(Array), + }, + transaction: '/client-error', + contexts: { + trace: { + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + span_id: expect.stringMatching(/[a-f0-9]{16}/), + }, + }, + environment: 'qa', + }); + }); +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6/tests/errors.server.test.ts b/dev-packages/e2e-tests/test-applications/astro-6/tests/errors.server.test.ts new file mode 100644 index 000000000000..f72ced97613c --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/tests/errors.server.test.ts @@ -0,0 +1,161 @@ +import { expect, test } from '@playwright/test'; +import { waitForError, waitForTransaction } from '@sentry-internal/test-utils'; + +test.describe('server-side errors', () => { + test('captures SSR error', async ({ page }) => { + const errorEventPromise = waitForError('astro-6', errorEvent => { + return errorEvent?.exception?.values?.[0]?.value === "Cannot read properties of undefined (reading 'x')"; + }); + + const transactionEventPromise = waitForTransaction('astro-6', transactionEvent => { + return transactionEvent.transaction === 'GET /ssr-error'; + }); + + // This page returns an error status code, so we need to catch the navigation error + await page.goto('/ssr-error').catch(() => { + // Expected to fail with net::ERR_HTTP_RESPONSE_CODE_FAILURE in newer Chromium versions + }); + + const errorEvent = await errorEventPromise; + const transactionEvent = await transactionEventPromise; + + expect(transactionEvent).toMatchObject({ + transaction: 'GET /ssr-error', + spans: [], + }); + + const traceId = transactionEvent.contexts?.trace?.trace_id; + const spanId = transactionEvent.contexts?.trace?.span_id; + + expect(traceId).toMatch(/[a-f0-9]{32}/); + expect(spanId).toMatch(/[a-f0-9]{16}/); + expect(transactionEvent.contexts?.trace?.parent_span_id).toBeUndefined(); + + expect(errorEvent).toMatchObject({ + contexts: { + app: expect.any(Object), + cloud_resource: expect.any(Object), + culture: expect.any(Object), + device: expect.any(Object), + os: expect.any(Object), + runtime: expect.any(Object), + trace: { + span_id: spanId, + trace_id: traceId, + }, + }, + environment: 'qa', + event_id: expect.stringMatching(/[a-f0-9]{32}/), + exception: { + values: [ + { + mechanism: { + handled: false, + type: 'auto.middleware.astro', + }, + stacktrace: expect.any(Object), + type: 'TypeError', + value: "Cannot read properties of undefined (reading 'x')", + }, + ], + }, + platform: 'node', + request: { + cookies: {}, + headers: expect.objectContaining({ + // demonstrates that requestData integration is getting data + host: 'localhost:3030', + 'user-agent': expect.any(String), + }), + method: 'GET', + url: expect.stringContaining('/ssr-error'), + }, + sdk: { + integrations: expect.any(Array), + name: 'sentry.javascript.astro', + packages: expect.any(Array), + version: expect.any(String), + }, + server_name: expect.any(String), + timestamp: expect.any(Number), + transaction: 'GET /ssr-error', + }); + }); + + test('captures endpoint error', async ({ page }) => { + const errorEventPromise = waitForError('astro-6', errorEvent => { + return errorEvent?.exception?.values?.[0]?.value === 'Endpoint Error'; + }); + const transactionEventApiPromise = waitForTransaction('astro-6', transactionEvent => { + return transactionEvent.transaction === 'GET /endpoint-error/api'; + }); + const transactionEventEndpointPromise = waitForTransaction('astro-6', transactionEvent => { + return transactionEvent.transaction === 'GET /endpoint-error'; + }); + + await page.goto('/endpoint-error'); + await page.getByText('Get Data').click(); + + const errorEvent = await errorEventPromise; + const transactionEventApi = await transactionEventApiPromise; + const transactionEventEndpoint = await transactionEventEndpointPromise; + + expect(transactionEventEndpoint).toMatchObject({ + transaction: 'GET /endpoint-error', + spans: [], + }); + + const traceId = transactionEventEndpoint.contexts?.trace?.trace_id; + const endpointSpanId = transactionEventApi.contexts?.trace?.span_id; + + expect(traceId).toMatch(/[a-f0-9]{32}/); + expect(endpointSpanId).toMatch(/[a-f0-9]{16}/); + + expect(transactionEventApi).toMatchObject({ + transaction: 'GET /endpoint-error/api', + spans: [], + }); + + const spanId = transactionEventApi.contexts?.trace?.span_id; + const parentSpanId = transactionEventApi.contexts?.trace?.parent_span_id; + + expect(spanId).toMatch(/[a-f0-9]{16}/); + // TODO: This is incorrect, for whatever reason, it should be the endpointSpanId ideally + expect(parentSpanId).toMatch(/[a-f0-9]{16}/); + expect(parentSpanId).not.toEqual(endpointSpanId); + + expect(errorEvent).toMatchObject({ + contexts: { + trace: { + parent_span_id: parentSpanId, + span_id: spanId, + trace_id: traceId, + }, + }, + exception: { + values: [ + { + mechanism: { + handled: false, + type: 'auto.middleware.astro', + }, + stacktrace: expect.any(Object), + type: 'Error', + value: 'Endpoint Error', + }, + ], + }, + platform: 'node', + request: { + cookies: {}, + headers: expect.objectContaining({ + accept: expect.any(String), + }), + method: 'GET', + query_string: 'error=1', + url: expect.stringContaining('endpoint-error/api?error=1'), + }, + transaction: 'GET /endpoint-error/api', + }); + }); +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6/tests/tracing.dynamic.test.ts b/dev-packages/e2e-tests/test-applications/astro-6/tests/tracing.dynamic.test.ts new file mode 100644 index 000000000000..a89d1efd9f3a --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/tests/tracing.dynamic.test.ts @@ -0,0 +1,411 @@ +import { expect, test } from '@playwright/test'; +import { waitForTransaction } from '@sentry-internal/test-utils'; + +test.describe('tracing in dynamically rendered (ssr) routes', () => { + test('sends server and client pageload spans with the same trace id', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction === '/test-ssr'; + }); + + const serverPageRequestTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction === 'GET /test-ssr'; + }); + + await page.goto('/test-ssr'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const serverPageRequestTxn = await serverPageRequestTxnPromise; + + const clientPageloadTraceId = clientPageloadTxn.contexts?.trace?.trace_id; + const clientPageloadParentSpanId = clientPageloadTxn.contexts?.trace?.parent_span_id; + + const serverPageRequestTraceId = serverPageRequestTxn.contexts?.trace?.trace_id; + const serverPageloadSpanId = serverPageRequestTxn.contexts?.trace?.span_id; + + expect(clientPageloadTraceId).toEqual(serverPageRequestTraceId); + expect(clientPageloadParentSpanId).toEqual(serverPageloadSpanId); + + expect(clientPageloadTxn).toMatchObject({ + contexts: { + trace: { + data: expect.objectContaining({ + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.astro', + 'sentry.source': 'route', + }), + op: 'pageload', + origin: 'auto.pageload.astro', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + }, + }, + environment: 'qa', + event_id: expect.stringMatching(/[a-f0-9]{32}/), + measurements: expect.any(Object), + platform: 'javascript', + request: expect.any(Object), + sdk: { + integrations: expect.any(Array), + name: 'sentry.javascript.astro', + packages: expect.any(Array), + version: expect.any(String), + }, + spans: expect.any(Array), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + transaction: '/test-ssr', + transaction_info: { + source: 'route', + }, + type: 'transaction', + }); + + expect(serverPageRequestTxn).toMatchObject({ + contexts: { + app: expect.any(Object), + cloud_resource: expect.any(Object), + culture: expect.any(Object), + device: expect.any(Object), + os: expect.any(Object), + otel: expect.any(Object), + runtime: expect.any(Object), + trace: { + data: { + 'http.response.status_code': 200, + method: 'GET', + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.sample_rate': 1, + 'sentry.source': 'route', + url: expect.stringContaining('/test-ssr'), + 'http.request.header.accept': expect.any(String), + 'http.request.header.accept_encoding': 'gzip, deflate, br, zstd', + 'http.request.header.accept_language': 'en-US', + 'http.request.header.sec_fetch_mode': 'navigate', + 'http.request.header.user_agent': expect.any(String), + }, + op: 'http.server', + origin: 'auto.http.astro', + status: 'ok', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + }, + }, + environment: 'qa', + event_id: expect.stringMatching(/[a-f0-9]{32}/), + platform: 'node', + request: { + cookies: {}, + headers: expect.objectContaining({ + // demonstrates that request data integration can extract headers + accept: expect.any(String), + 'accept-encoding': expect.any(String), + 'user-agent': expect.any(String), + }), + method: 'GET', + url: expect.stringContaining('/test-ssr'), + }, + sdk: { + integrations: expect.any(Array), + name: 'sentry.javascript.astro', + packages: expect.any(Array), + version: expect.any(String), + }, + server_name: expect.any(String), + spans: expect.any(Array), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + transaction: 'GET /test-ssr', + transaction_info: { + source: 'route', + }, + type: 'transaction', + }); + }); +}); + +test.describe('nested SSR routes (client, server, server request)', () => { + /** The user-page route fetches from an endpoint and creates a deeply nested span structure: + * pageload — /user-page/myUsername123 + * ├── browser.** — multiple browser spans + * └── browser.request — /user-page/myUsername123 + * └── http.server — GET /user-page/[userId] (SSR page request) + * └── http.client — GET /api/user/myUsername123.json (executing fetch call from SSR page - span) + * └── http.server — GET /api/user/myUsername123.json (server request) + */ + test('sends connected server and client pageload and request spans with the same trace id', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('/user-page/') ?? false; + }); + + const serverPageRequestTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /user-page/') ?? false; + }); + + const serverHTTPServerRequestTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /api/user/') ?? false; + }); + + await page.goto('/user-page/myUsername123'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const serverPageRequestTxn = await serverPageRequestTxnPromise; + const serverHTTPServerRequestTxn = await serverHTTPServerRequestTxnPromise; + const serverRequestHTTPClientSpan = serverPageRequestTxn.spans?.find( + span => span.op === 'http.client' && span.description?.includes('/api/user/'), + ); + + const clientPageloadTraceId = clientPageloadTxn.contexts?.trace?.trace_id; + + // Verify all spans have the same trace ID + expect(clientPageloadTraceId).toEqual(serverPageRequestTxn.contexts?.trace?.trace_id); + expect(clientPageloadTraceId).toEqual(serverHTTPServerRequestTxn.contexts?.trace?.trace_id); + expect(clientPageloadTraceId).toEqual(serverRequestHTTPClientSpan?.trace_id); + + // serverPageRequest has no parent (root span) + expect(serverPageRequestTxn.contexts?.trace?.parent_span_id).toBeUndefined(); + + // clientPageload's parent and serverRequestHTTPClient's parent is serverPageRequest + const serverPageRequestSpanId = serverPageRequestTxn.contexts?.trace?.span_id; + expect(clientPageloadTxn.contexts?.trace?.parent_span_id).toEqual(serverPageRequestSpanId); + expect(serverRequestHTTPClientSpan?.parent_span_id).toEqual(serverPageRequestSpanId); + + // serverHTTPServerRequest's parent is serverRequestHTTPClient + expect(serverHTTPServerRequestTxn.contexts?.trace?.parent_span_id).toEqual(serverRequestHTTPClientSpan?.span_id); + }); + + test('sends parametrized pageload, server and API request transaction names', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('/user-page/') ?? false; + }); + + const serverPageRequestTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /user-page/') ?? false; + }); + + const serverHTTPServerRequestTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /api/user/') ?? false; + }); + + await page.goto('/user-page/myUsername123'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const serverPageRequestTxn = await serverPageRequestTxnPromise; + const serverHTTPServerRequestTxn = await serverHTTPServerRequestTxnPromise; + + const serverRequestHTTPClientSpan = serverPageRequestTxn.spans?.find( + span => span.op === 'http.client' && span.description?.includes('/api/user/'), + ); + + const routeNameMetaContent = await page.locator('meta[name="sentry-route-name"]').getAttribute('content'); + expect(routeNameMetaContent).toBe('%2Fuser-page%2F%5BuserId%5D'); + + // Client pageload transaction - actual URL with pageload operation + expect(clientPageloadTxn).toMatchObject({ + transaction: '/user-page/[userId]', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'pageload', + origin: 'auto.pageload.astro', + data: { + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.astro', + 'sentry.source': 'route', + }, + }, + }, + }); + + // Server page request transaction - parametrized transaction name with actual URL in data + expect(serverPageRequestTxn).toMatchObject({ + transaction: 'GET /user-page/[userId]', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'http.server', + origin: 'auto.http.astro', + data: { + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.source': 'route', + url: expect.stringContaining('/user-page/myUsername123'), + 'http.request.header.accept': expect.any(String), + 'http.request.header.accept_encoding': 'gzip, deflate, br, zstd', + 'http.request.header.accept_language': 'en-US', + 'http.request.header.sec_fetch_mode': 'navigate', + 'http.request.header.user_agent': expect.any(String), + }, + }, + }, + request: { url: expect.stringContaining('/user-page/myUsername123') }, + }); + + // HTTP client span - actual API URL with client operation + expect(serverRequestHTTPClientSpan).toMatchObject({ + op: 'http.client', + origin: 'auto.http.otel.node_fetch', + description: 'GET http://localhost:3030/api/user/myUsername123.json', // http.client does not need to be parametrized + data: { + 'sentry.op': 'http.client', + 'sentry.origin': 'auto.http.otel.node_fetch', + 'url.full': expect.stringContaining('/api/user/myUsername123.json'), + 'url.path': '/api/user/myUsername123.json', + url: expect.stringContaining('/api/user/myUsername123.json'), + }, + }); + + // Server HTTP request transaction + expect(serverHTTPServerRequestTxn).toMatchObject({ + transaction: 'GET /api/user/[userId].json', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'http.server', + origin: 'auto.http.astro', + data: { + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.source': 'route', + url: expect.stringContaining('/api/user/myUsername123.json'), + 'http.request.header.accept': expect.any(String), + 'http.request.header.accept_encoding': 'gzip, deflate', + 'http.request.header.accept_language': '*', + 'http.request.header.sec_fetch_mode': 'cors', + 'http.request.header.user_agent': expect.any(String), + }, + }, + }, + request: { url: expect.stringContaining('/api/user/myUsername123.json') }, + }); + }); + + test('sends parametrized pageload and server transaction names for catch-all routes', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('/catchAll/') ?? false; + }); + + const serverPageRequestTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /catchAll/') ?? false; + }); + + await page.goto('/catchAll/hell0/whatever-do'); + + const routeNameMetaContent = await page.locator('meta[name="sentry-route-name"]').getAttribute('content'); + expect(routeNameMetaContent).toBe('%2FcatchAll%2F%5B...path%5D'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const serverPageRequestTxn = await serverPageRequestTxnPromise; + + expect(clientPageloadTxn).toMatchObject({ + transaction: '/catchAll/[...path]', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'pageload', + origin: 'auto.pageload.astro', + data: { + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.astro', + 'sentry.source': 'route', + }, + }, + }, + }); + + expect(serverPageRequestTxn).toMatchObject({ + transaction: 'GET /catchAll/[...path]', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'http.server', + origin: 'auto.http.astro', + data: { + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.source': 'route', + url: expect.stringContaining('/catchAll/hell0/whatever-do'), + 'http.request.header.accept': expect.any(String), + 'http.request.header.accept_encoding': 'gzip, deflate, br, zstd', + 'http.request.header.accept_language': 'en-US', + 'http.request.header.sec_fetch_mode': 'navigate', + 'http.request.header.user_agent': expect.any(String), + }, + }, + }, + request: { url: expect.stringContaining('/catchAll/hell0/whatever-do') }, + }); + }); +}); + +// Case for `user-page/[id]` vs. `user-page/settings` static routes +test.describe('parametrized vs static paths', () => { + test('should use static route name for static route in parametrized path', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('/user-page/') ?? false; + }); + + const serverPageRequestTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('GET /user-page/') ?? false; + }); + + await page.goto('/user-page/settings'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const serverPageRequestTxn = await serverPageRequestTxnPromise; + + expect(clientPageloadTxn).toMatchObject({ + transaction: '/user-page/settings', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'pageload', + origin: 'auto.pageload.astro', + data: { + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.astro', + 'sentry.source': 'route', + }, + }, + }, + }); + + expect(serverPageRequestTxn).toMatchObject({ + transaction: 'GET /user-page/settings', + transaction_info: { source: 'route' }, + contexts: { + trace: { + op: 'http.server', + origin: 'auto.http.astro', + data: { + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.source': 'route', + url: expect.stringContaining('/user-page/settings'), + 'http.request.header.accept': expect.any(String), + 'http.request.header.accept_encoding': 'gzip, deflate, br, zstd', + 'http.request.header.accept_language': 'en-US', + 'http.request.header.sec_fetch_mode': 'navigate', + 'http.request.header.user_agent': expect.any(String), + }, + }, + }, + request: { url: expect.stringContaining('/user-page/settings') }, + }); + }); + + test('allows for span name override via beforeStartSpan', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction?.startsWith('/blog/') ?? false; + }); + + await page.goto('/blog/my-post'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + expect(clientPageloadTxn).toMatchObject({ + transaction: '/blog/my-post', + transaction_info: { source: 'custom' }, + }); + }); +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6/tests/tracing.serverIslands.test.ts b/dev-packages/e2e-tests/test-applications/astro-6/tests/tracing.serverIslands.test.ts new file mode 100644 index 000000000000..10910c01bd3f --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/tests/tracing.serverIslands.test.ts @@ -0,0 +1,100 @@ +import { expect, test } from '@playwright/test'; +import { waitForTransaction } from '@sentry-internal/test-utils'; + +// Skipping this test FOR NOW because there's a known bug in Astro 6.0.2 that causes +// server-islands to not work correctly with the node adapter: +// https://github.com/withastro/astro/issues/15753 +test.describe.skip('tracing in static routes with server islands', () => { + test('only sends client pageload transaction and server island endpoint transaction', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent.transaction === '/server-island'; + }); + + const serverIslandEndpointTxnPromise = waitForTransaction('astro-6', evt => { + return evt.transaction === 'GET /_server-islands/[name]'; + }); + + await page.goto('/server-island'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + const clientPageloadTraceId = clientPageloadTxn.contexts?.trace?.trace_id; + const clientPageloadParentSpanId = clientPageloadTxn.contexts?.trace?.parent_span_id; + + const sentryTraceMetaTags = await page.locator('meta[name="sentry-trace"]').count(); + expect(sentryTraceMetaTags).toBe(0); + + const baggageMetaTags = await page.locator('meta[name="baggage"]').count(); + expect(baggageMetaTags).toBe(0); + + expect(clientPageloadTraceId).toMatch(/[a-f0-9]{32}/); + expect(clientPageloadParentSpanId).toBeUndefined(); + + expect(clientPageloadTxn).toMatchObject({ + contexts: { + trace: { + data: expect.objectContaining({ + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.astro', + 'sentry.source': 'route', + }), + op: 'pageload', + origin: 'auto.pageload.astro', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: clientPageloadTraceId, + }, + }, + platform: 'javascript', + transaction: '/server-island', + transaction_info: { + source: 'route', + }, + type: 'transaction', + }); + + const pageloadSpans = clientPageloadTxn.spans; + + // pageload transaction contains a resource link span for the preloaded server island request + expect(pageloadSpans).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + op: 'resource.link', + origin: 'auto.resource.browser.metrics', + description: expect.stringMatching(/\/_server-islands\/Avatar.*$/), + }), + ]), + ); + + const serverIslandEndpointTxn = await serverIslandEndpointTxnPromise; + + expect(serverIslandEndpointTxn).toMatchObject({ + contexts: { + trace: { + data: expect.objectContaining({ + 'sentry.op': 'http.server', + 'sentry.origin': 'auto.http.astro', + 'sentry.source': 'route', + 'http.request.header.accept': expect.any(String), + 'http.request.header.accept_encoding': 'gzip, deflate, br, zstd', + 'http.request.header.accept_language': 'en-US', + 'http.request.header.sec_fetch_mode': 'cors', + 'http.request.header.user_agent': expect.any(String), + }), + op: 'http.server', + origin: 'auto.http.astro', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + }, + }, + transaction: 'GET /_server-islands/[name]', + }); + + const serverIslandEndpointTraceId = serverIslandEndpointTxn.contexts?.trace?.trace_id; + + // unfortunately, the server island trace id is not the same as the client pageload trace id + // this is because the server island endpoint request is made as a resource link request, + // meaning our fetch instrumentation can't attach headers to the request :( + expect(serverIslandEndpointTraceId).not.toBe(clientPageloadTraceId); + + await page.waitForTimeout(1000); // wait another sec to ensure no server transaction is sent + }); +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6/tests/tracing.static.test.ts b/dev-packages/e2e-tests/test-applications/astro-6/tests/tracing.static.test.ts new file mode 100644 index 000000000000..c76a66101775 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/tests/tracing.static.test.ts @@ -0,0 +1,57 @@ +import { expect, test } from '@playwright/test'; +import { waitForTransaction } from '@sentry-internal/test-utils'; + +test.describe('tracing in static/pre-rendered routes', () => { + test('only sends client pageload span with traceId from pre-rendered tags', async ({ page }) => { + const clientPageloadTxnPromise = waitForTransaction('astro-6', txnEvent => { + return txnEvent?.transaction === '/test-static'; + }); + + waitForTransaction('astro-6', evt => { + if (evt.platform !== 'javascript') { + throw new Error('Server transaction should not be sent'); + } + return false; + }); + + await page.goto('/test-static'); + + const clientPageloadTxn = await clientPageloadTxnPromise; + + const clientPageloadTraceId = clientPageloadTxn.contexts?.trace?.trace_id; + const clientPageloadParentSpanId = clientPageloadTxn.contexts?.trace?.parent_span_id; + + const sentryTraceMetaTags = await page.locator('meta[name="sentry-trace"]').count(); + expect(sentryTraceMetaTags).toBe(0); + + const baggageMetaTags = await page.locator('meta[name="baggage"]').count(); + expect(baggageMetaTags).toBe(0); + + expect(clientPageloadTraceId).toMatch(/[a-f0-9]{32}/); + expect(clientPageloadParentSpanId).toBeUndefined(); + + expect(clientPageloadTxn).toMatchObject({ + contexts: { + trace: { + data: expect.objectContaining({ + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.astro', + 'sentry.source': 'route', + }), + op: 'pageload', + origin: 'auto.pageload.astro', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + }, + }, + platform: 'javascript', + transaction: '/test-static', + transaction_info: { + source: 'route', + }, + type: 'transaction', + }); + + await page.waitForTimeout(1000); // wait another sec to ensure no server transaction is sent + }); +}); diff --git a/dev-packages/e2e-tests/test-applications/astro-6/tsconfig.json b/dev-packages/e2e-tests/test-applications/astro-6/tsconfig.json new file mode 100644 index 000000000000..8bf91d3bb997 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/astro-6/tsconfig.json @@ -0,0 +1,5 @@ +{ + "extends": "astro/tsconfigs/strict", + "include": [".astro/types.d.ts", "**/*"], + "exclude": ["dist"] +} diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/.npmrc b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/.npmrc new file mode 100644 index 000000000000..070f80f05092 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/.npmrc @@ -0,0 +1,2 @@ +@sentry:registry=http://127.0.0.1:4873 +@sentry-internal:registry=http://127.0.0.1:4873 diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/index.html b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/index.html new file mode 100644 index 000000000000..acc42eb2480f --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/index.html @@ -0,0 +1,9 @@ + + + + + + +
+ + diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/package.json b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/package.json new file mode 100644 index 000000000000..04bac1dafd66 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/package.json @@ -0,0 +1,21 @@ +{ + "name": "@browser-mfe-vite/mfe-header", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "build": "vite build", + "preview": "vite preview --port 3032" + }, + "dependencies": { + "@sentry/browser": "latest || *", + "react": "^18.3.1", + "react-dom": "^18.3.1" + }, + "devDependencies": { + "@types/react": "^18.3.5", + "@originjs/vite-plugin-federation": "^1.3.6", + "@vitejs/plugin-react": "^4.3.1", + "vite": "^5.4.0" + } +} diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/src/App.tsx b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/src/App.tsx new file mode 100644 index 000000000000..6456fe13d6a5 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/src/App.tsx @@ -0,0 +1,15 @@ +import React, { useEffect } from 'react'; +import * as Sentry from '@sentry/browser'; + +function MfeHeader() { + useEffect(() => { + Sentry.withScope(scope => { + scope.setTag('mfe.name', 'mfe-header'); + fetch('http://localhost:6969/api/header-data'); + }); + }, []); + + return
Header MFE
; +} + +export default MfeHeader; diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/vite.config.ts b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/vite.config.ts new file mode 100644 index 000000000000..d179f84a26d6 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-header/vite.config.ts @@ -0,0 +1,20 @@ +import { defineConfig } from 'vite'; +import react from '@vitejs/plugin-react'; +import federation from '@originjs/vite-plugin-federation'; + +export default defineConfig({ + plugins: [ + react(), + federation({ + name: 'mfe_header', + filename: 'remoteEntry.js', + exposes: { './App': './src/App.tsx' }, + shared: ['react', 'react-dom'], + }), + ], + build: { + target: 'esnext', + minify: false, + }, + preview: { port: 3032 }, +}); diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/index.html b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/index.html new file mode 100644 index 000000000000..acc42eb2480f --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/index.html @@ -0,0 +1,9 @@ + + + + + + +
+ + diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/package.json b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/package.json new file mode 100644 index 000000000000..6db9d42e97ac --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/package.json @@ -0,0 +1,21 @@ +{ + "name": "@browser-mfe-vite/mfe-one", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "build": "vite build", + "preview": "vite preview --port 3033" + }, + "dependencies": { + "@sentry/browser": "latest || *", + "react": "^18.3.1", + "react-dom": "^18.3.1" + }, + "devDependencies": { + "@types/react": "^18.3.5", + "@originjs/vite-plugin-federation": "^1.3.6", + "@vitejs/plugin-react": "^4.3.1", + "vite": "^5.4.0" + } +} diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/src/App.tsx b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/src/App.tsx new file mode 100644 index 000000000000..2a6f46afbc86 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/src/App.tsx @@ -0,0 +1,15 @@ +import React, { useEffect } from 'react'; +import * as Sentry from '@sentry/browser'; + +function MfeOne() { + useEffect(() => { + Sentry.withScope(scope => { + scope.setTag('mfe.name', 'mfe-one'); + fetch('http://localhost:6969/api/mfe-one-data'); + }); + }, []); + + return
MFE One
; +} + +export default MfeOne; diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/vite.config.ts b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/vite.config.ts new file mode 100644 index 000000000000..3dda22d9c31b --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/mfe-one/vite.config.ts @@ -0,0 +1,20 @@ +import { defineConfig } from 'vite'; +import react from '@vitejs/plugin-react'; +import federation from '@originjs/vite-plugin-federation'; + +export default defineConfig({ + plugins: [ + react(), + federation({ + name: 'mfe_one', + filename: 'remoteEntry.js', + exposes: { './App': './src/App.tsx' }, + shared: ['react', 'react-dom'], + }), + ], + build: { + target: 'esnext', + minify: false, + }, + preview: { port: 3033 }, +}); diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/index.html b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/index.html new file mode 100644 index 000000000000..f5d567e1c5c2 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/index.html @@ -0,0 +1,12 @@ + + + + + + MFE Shell + + +
+ + + diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/package.json b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/package.json new file mode 100644 index 000000000000..a3437e079f7f --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/package.json @@ -0,0 +1,23 @@ +{ + "name": "@browser-mfe-vite/shell", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "build": "vite build", + "preview": "vite preview --port 3030" + }, + "dependencies": { + "@sentry/browser": "latest || *", + "@sentry/react": "latest || *", + "react": "^18.3.1", + "react-dom": "^18.3.1" + }, + "devDependencies": { + "@types/react": "^18.3.5", + "@types/react-dom": "^18.3.0", + "@originjs/vite-plugin-federation": "^1.3.6", + "@vitejs/plugin-react": "^4.3.1", + "vite": "^5.4.0" + } +} diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/src/declarations.d.ts b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/src/declarations.d.ts new file mode 100644 index 000000000000..8f30b452f011 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/src/declarations.d.ts @@ -0,0 +1,11 @@ +declare module 'mfe_header/App' { + import type { ComponentType } from 'react'; + const App: ComponentType; + export default App; +} + +declare module 'mfe_one/App' { + import type { ComponentType } from 'react'; + const App: ComponentType; + export default App; +} diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/src/main.tsx b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/src/main.tsx new file mode 100644 index 000000000000..cf33087c4f04 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/src/main.tsx @@ -0,0 +1,47 @@ +import React, { lazy, Suspense } from 'react'; +import ReactDOM from 'react-dom/client'; +import * as Sentry from '@sentry/react'; + +Sentry.init({ + dsn: import.meta.env.PUBLIC_E2E_TEST_DSN, + environment: import.meta.env.MODE || 'development', + integrations: [Sentry.browserTracingIntegration()], + tracesSampleRate: 1.0, + tunnel: 'http://localhost:3031/', +}); + +// Workaround: propagate MFE identity from current scope to span attributes +const client = Sentry.getClient()!; +client.on('spanStart', span => { + const mfeName = Sentry.getCurrentScope().getScopeData().tags['mfe.name']; + if (typeof mfeName === 'string') { + span.setAttribute('mfe.name', mfeName); + } +}); + +// Load MFEs via Module Federation (React.lazy + dynamic import) +const MfeHeader = lazy(() => import('mfe_header/App')); +const MfeOne = lazy(() => import('mfe_one/App')); + +function App() { + return ( +
+

Shell

+ Loading header...
}> + + + Loading mfe-one...}> + + + + ); +} + +// Shell's own fetch — no MFE scope +fetch('http://localhost:6969/api/shell-config'); + +ReactDOM.createRoot(document.getElementById('root')!).render( + + + , +); diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/tsconfig.json b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/tsconfig.json new file mode 100644 index 000000000000..523ce2191211 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ESNext", + "lib": ["ES2022", "DOM", "DOM.Iterable"], + "jsx": "react-jsx", + "skipLibCheck": true, + "moduleResolution": "bundler", + "noEmit": true, + "strict": true, + "erasableSyntaxOnly": true + }, + "include": ["src"] +} diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/vite.config.ts b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/vite.config.ts new file mode 100644 index 000000000000..fb95b0270974 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/apps/shell/vite.config.ts @@ -0,0 +1,24 @@ +import { defineConfig } from 'vite'; +import react from '@vitejs/plugin-react'; +import federation from '@originjs/vite-plugin-federation'; + +export default defineConfig({ + plugins: [ + react(), + federation({ + name: 'shell', + remotes: { + mfe_header: 'http://localhost:3032/assets/remoteEntry.js', + mfe_one: 'http://localhost:3033/assets/remoteEntry.js', + }, + shared: ['react', 'react-dom'], + }), + ], + build: { + target: 'esnext', + minify: false, + envPrefix: ['PUBLIC_'], + }, + envPrefix: ['PUBLIC_'], + preview: { port: 3030 }, +}); diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/package.json b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/package.json new file mode 100644 index 000000000000..bec4607c0678 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/package.json @@ -0,0 +1,25 @@ +{ + "name": "browser-mfe-vite", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "build": "pnpm build:mfes && pnpm build:shell", + "build:mfes": "cd apps/mfe-header && pnpm build && cd ../mfe-one && pnpm build", + "build:shell": "cd apps/shell && pnpm build", + "preview": "concurrently \"cd apps/mfe-header && pnpm preview\" \"cd apps/mfe-one && pnpm preview\" \"cd apps/shell && pnpm preview\"", + "test": "playwright test", + "test:build": "pnpm install && pnpm build", + "test:assert": "pnpm test" + }, + "devDependencies": { + "@playwright/test": "~1.56.0", + "@sentry-internal/test-utils": "link:../../../test-utils", + "concurrently": "^8.2.2" + }, + "volta": { + "node": "20.19.2", + "yarn": "1.22.22", + "pnpm": "9.15.9" + } +} diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/playwright.config.mjs b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/playwright.config.mjs new file mode 100644 index 000000000000..6d097c2436e9 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/playwright.config.mjs @@ -0,0 +1,41 @@ +import { getPlaywrightConfig } from '@sentry-internal/test-utils'; + +const config = getPlaywrightConfig( + { + startCommand: 'pnpm preview', + eventProxyFile: 'start-event-proxy.mjs', + eventProxyPort: 3031, + port: 3030, + }, + { + // Wait for all three servers to be ready + webServer: [ + { + command: 'node start-event-proxy.mjs', + port: 3031, + stdout: 'pipe', + stderr: 'pipe', + }, + { + command: 'cd apps/mfe-header && pnpm preview', + port: 3032, + stdout: 'pipe', + stderr: 'pipe', + }, + { + command: 'cd apps/mfe-one && pnpm preview', + port: 3033, + stdout: 'pipe', + stderr: 'pipe', + }, + { + command: 'cd apps/shell && pnpm preview', + port: 3030, + stdout: 'pipe', + stderr: 'pipe', + }, + ], + }, +); + +export default config; diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/pnpm-workspace.yaml b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/pnpm-workspace.yaml new file mode 100644 index 000000000000..8ab3e17a0de1 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/pnpm-workspace.yaml @@ -0,0 +1,2 @@ +packages: + - 'apps/*' diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/start-event-proxy.mjs b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/start-event-proxy.mjs new file mode 100644 index 000000000000..4a232f2b5489 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/start-event-proxy.mjs @@ -0,0 +1,6 @@ +import { startEventProxyServer } from '@sentry-internal/test-utils'; + +startEventProxyServer({ + port: 3031, + proxyServerName: 'browser-mfe-vite', +}); diff --git a/dev-packages/e2e-tests/test-applications/browser-mfe-vite/tests/mfe-span-attribution.test.ts b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/tests/mfe-span-attribution.test.ts new file mode 100644 index 000000000000..0f842bb64c7e --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/browser-mfe-vite/tests/mfe-span-attribution.test.ts @@ -0,0 +1,28 @@ +import { expect, test } from '@playwright/test'; +import { waitForTransaction } from '@sentry-internal/test-utils'; + +test('attributes fetch spans to their originating microfrontend', async ({ page }) => { + const transactionPromise = waitForTransaction('browser-mfe-vite', transactionEvent => { + return !!transactionEvent?.transaction && transactionEvent.contexts?.trace?.op === 'pageload'; + }); + + await page.goto('/'); + + const transactionEvent = await transactionPromise; + const httpSpans = transactionEvent.spans?.filter(span => span.op === 'http.client') || []; + + // MFE spans carry the mfe.name attribute set via withScope + spanStart hook + const headerSpan = httpSpans.find(s => s.description?.includes('/api/header-data')); + const mfeOneSpan = httpSpans.find(s => s.description?.includes('/api/mfe-one-data')); + const shellSpan = httpSpans.find(s => s.description?.includes('/api/shell-config')); + + expect(headerSpan).toBeDefined(); + expect(mfeOneSpan).toBeDefined(); + expect(shellSpan).toBeDefined(); + + expect(headerSpan?.data?.['mfe.name']).toBe('mfe-header'); + expect(mfeOneSpan?.data?.['mfe.name']).toBe('mfe-one'); + + // Shell span has no MFE tag + expect(shellSpan?.data?.['mfe.name']).toBeUndefined(); +}); diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/.gitignore b/dev-packages/e2e-tests/test-applications/effect-browser/.gitignore new file mode 100644 index 000000000000..bd66327c3b4a --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/.gitignore @@ -0,0 +1,28 @@ +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage + +# production +/build +/dist + +# misc +.DS_Store +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +/test-results/ +/playwright-report/ +/playwright/.cache/ + +!*.d.ts diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/.npmrc b/dev-packages/e2e-tests/test-applications/effect-browser/.npmrc new file mode 100644 index 000000000000..070f80f05092 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/.npmrc @@ -0,0 +1,2 @@ +@sentry:registry=http://127.0.0.1:4873 +@sentry-internal:registry=http://127.0.0.1:4873 diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/build.mjs b/dev-packages/e2e-tests/test-applications/effect-browser/build.mjs new file mode 100644 index 000000000000..63c63597d4fe --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/build.mjs @@ -0,0 +1,52 @@ +import * as path from 'path'; +import * as url from 'url'; +import HtmlWebpackPlugin from 'html-webpack-plugin'; +import TerserPlugin from 'terser-webpack-plugin'; +import webpack from 'webpack'; + +const __dirname = path.dirname(url.fileURLToPath(import.meta.url)); + +webpack( + { + entry: path.join(__dirname, 'src/index.js'), + output: { + path: path.join(__dirname, 'build'), + filename: 'app.js', + }, + optimization: { + minimize: true, + minimizer: [new TerserPlugin()], + }, + plugins: [ + new webpack.EnvironmentPlugin(['E2E_TEST_DSN']), + new HtmlWebpackPlugin({ + template: path.join(__dirname, 'public/index.html'), + }), + ], + performance: { + hints: false, + }, + mode: 'production', + }, + (err, stats) => { + if (err) { + console.error(err.stack || err); + if (err.details) { + console.error(err.details); + } + return; + } + + const info = stats.toJson(); + + if (stats.hasErrors()) { + console.error(info.errors); + process.exit(1); + } + + if (stats.hasWarnings()) { + console.warn(info.warnings); + process.exit(1); + } + }, +); diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/package.json b/dev-packages/e2e-tests/test-applications/effect-browser/package.json new file mode 100644 index 000000000000..6c2e7e63ced8 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/package.json @@ -0,0 +1,42 @@ +{ + "name": "effect-browser-test-app", + "version": "1.0.0", + "private": true, + "scripts": { + "start": "serve -s build", + "build": "node build.mjs", + "test": "playwright test", + "clean": "npx rimraf node_modules pnpm-lock.yaml", + "test:build": "pnpm install && pnpm build", + "test:assert": "pnpm test" + }, + "dependencies": { + "@sentry/effect": "latest || *", + "@types/node": "^18.19.1", + "effect": "^3.19.19", + "typescript": "~5.0.0" + }, + "devDependencies": { + "@playwright/test": "~1.56.0", + "@sentry-internal/test-utils": "link:../../../test-utils", + "webpack": "^5.91.0", + "serve": "14.0.1", + "terser-webpack-plugin": "^5.3.10", + "html-webpack-plugin": "^5.6.0" + }, + "browserslist": { + "production": [ + ">0.2%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] + }, + "volta": { + "extends": "../../package.json" + } +} diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/playwright.config.mjs b/dev-packages/e2e-tests/test-applications/effect-browser/playwright.config.mjs new file mode 100644 index 000000000000..31f2b913b58b --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/playwright.config.mjs @@ -0,0 +1,7 @@ +import { getPlaywrightConfig } from '@sentry-internal/test-utils'; + +const config = getPlaywrightConfig({ + startCommand: `pnpm start`, +}); + +export default config; diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/public/index.html b/dev-packages/e2e-tests/test-applications/effect-browser/public/index.html new file mode 100644 index 000000000000..19d5c3d99a2f --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/public/index.html @@ -0,0 +1,48 @@ + + + + + + Effect Browser App + + +

Effect Browser E2E Test

+ +
+
+

Error Tests

+ +
+ +
+

Effect Span Tests

+ + +
+ +
+

Effect Failure Tests

+ + +
+ + +
+ +
+

Log Tests

+ + +
+ + +
+ + +
+ + diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/src/index.js b/dev-packages/e2e-tests/test-applications/effect-browser/src/index.js new file mode 100644 index 000000000000..4e9cb70d6e44 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/src/index.js @@ -0,0 +1,92 @@ +// @ts-check +import * as Sentry from '@sentry/effect'; +import * as Logger from 'effect/Logger'; +import * as Layer from 'effect/Layer'; +import * as Runtime from 'effect/Runtime'; +import * as LogLevel from 'effect/LogLevel'; +import * as Effect from 'effect/Effect'; + +const LogLevelLive = Logger.minimumLogLevel(LogLevel.Debug); +const AppLayer = Layer.mergeAll( + Sentry.effectLayer({ + dsn: process.env.E2E_TEST_DSN, + integrations: [ + Sentry.browserTracingIntegration({ + _experiments: { enableInteractions: true }, + }), + ], + tracesSampleRate: 1.0, + release: 'e2e-test', + environment: 'qa', + tunnel: 'http://localhost:3031', + enableLogs: true, + }), + Layer.setTracer(Sentry.SentryEffectTracer), + Logger.replace(Logger.defaultLogger, Sentry.SentryEffectLogger), + LogLevelLive, +); + +const runtime = Layer.toRuntime(AppLayer).pipe(Effect.scoped, Effect.runSync); + +const runEffect = fn => Runtime.runPromise(runtime)(fn()); + +document.getElementById('exception-button')?.addEventListener('click', () => { + throw new Error('I am an error!'); +}); + +document.getElementById('effect-span-button')?.addEventListener('click', async () => { + await runEffect(() => + Effect.gen(function* () { + yield* Effect.sleep('50 millis'); + yield* Effect.sleep('25 millis').pipe(Effect.withSpan('nested-span')); + }).pipe(Effect.withSpan('custom-effect-span', { kind: 'internal' })), + ); + const el = document.getElementById('effect-span-result'); + if (el) el.textContent = 'Span sent!'; +}); + +document.getElementById('effect-fail-button')?.addEventListener('click', async () => { + try { + await runEffect(() => Effect.fail(new Error('Effect failure'))); + } catch { + const el = document.getElementById('effect-fail-result'); + if (el) el.textContent = 'Effect failed (expected)'; + } +}); + +document.getElementById('effect-die-button')?.addEventListener('click', async () => { + try { + await runEffect(() => Effect.die('Effect defect')); + } catch { + const el = document.getElementById('effect-die-result'); + if (el) el.textContent = 'Effect died (expected)'; + } +}); + +document.getElementById('log-button')?.addEventListener('click', async () => { + await runEffect(() => + Effect.gen(function* () { + yield* Effect.logDebug('Debug log from Effect'); + yield* Effect.logInfo('Info log from Effect'); + yield* Effect.logWarning('Warning log from Effect'); + yield* Effect.logError('Error log from Effect'); + }), + ); + const el = document.getElementById('log-result'); + if (el) el.textContent = 'Logs sent!'; +}); + +document.getElementById('log-context-button')?.addEventListener('click', async () => { + await runEffect(() => + Effect.logInfo('Log with context').pipe( + Effect.annotateLogs('userId', '12345'), + Effect.annotateLogs('action', 'test'), + ), + ); + const el = document.getElementById('log-context-result'); + if (el) el.textContent = 'Log with context sent!'; +}); + +document.getElementById('navigation-link')?.addEventListener('click', () => { + document.getElementById('navigation-target')?.scrollIntoView({ behavior: 'smooth' }); +}); diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/start-event-proxy.mjs b/dev-packages/e2e-tests/test-applications/effect-browser/start-event-proxy.mjs new file mode 100644 index 000000000000..a86a1bd91404 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/start-event-proxy.mjs @@ -0,0 +1,6 @@ +import { startEventProxyServer } from '@sentry-internal/test-utils'; + +startEventProxyServer({ + port: 3031, + proxyServerName: 'effect-browser', +}); diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/tests/errors.test.ts b/dev-packages/e2e-tests/test-applications/effect-browser/tests/errors.test.ts new file mode 100644 index 000000000000..80589f683c28 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/tests/errors.test.ts @@ -0,0 +1,56 @@ +import { expect, test } from '@playwright/test'; +import { waitForError, waitForTransaction } from '@sentry-internal/test-utils'; + +test('captures an error', async ({ page }) => { + const errorEventPromise = waitForError('effect-browser', event => { + return !event.type && event.exception?.values?.[0]?.value === 'I am an error!'; + }); + + await page.goto('/'); + + const exceptionButton = page.locator('id=exception-button'); + await exceptionButton.click(); + + const errorEvent = await errorEventPromise; + + expect(errorEvent.exception?.values).toHaveLength(1); + expect(errorEvent.exception?.values?.[0]?.value).toBe('I am an error!'); + expect(errorEvent.transaction).toBe('/'); + + expect(errorEvent.request).toEqual({ + url: 'http://localhost:3030/', + headers: expect.any(Object), + }); + + expect(errorEvent.contexts?.trace).toEqual({ + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + span_id: expect.stringMatching(/[a-f0-9]{16}/), + }); +}); + +test('sets correct transactionName', async ({ page }) => { + const transactionPromise = waitForTransaction('effect-browser', async transactionEvent => { + return !!transactionEvent?.transaction && transactionEvent.contexts?.trace?.op === 'pageload'; + }); + + const errorEventPromise = waitForError('effect-browser', event => { + return !event.type && event.exception?.values?.[0]?.value === 'I am an error!'; + }); + + await page.goto('/'); + const transactionEvent = await transactionPromise; + + const exceptionButton = page.locator('id=exception-button'); + await exceptionButton.click(); + + const errorEvent = await errorEventPromise; + + expect(errorEvent.exception?.values).toHaveLength(1); + expect(errorEvent.exception?.values?.[0]?.value).toBe('I am an error!'); + expect(errorEvent.transaction).toEqual('/'); + + expect(errorEvent.contexts?.trace).toEqual({ + trace_id: transactionEvent.contexts?.trace?.trace_id, + span_id: expect.not.stringContaining(transactionEvent.contexts?.trace?.span_id || ''), + }); +}); diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/tests/logs.test.ts b/dev-packages/e2e-tests/test-applications/effect-browser/tests/logs.test.ts new file mode 100644 index 000000000000..f81bc249cbd8 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/tests/logs.test.ts @@ -0,0 +1,116 @@ +import { expect, test } from '@playwright/test'; +import { waitForEnvelopeItem } from '@sentry-internal/test-utils'; +import type { SerializedLogContainer } from '@sentry/core'; + +test('should send Effect debug logs', async ({ page }) => { + const logEnvelopePromise = waitForEnvelopeItem('effect-browser', envelope => { + return ( + envelope[0].type === 'log' && + (envelope[1] as SerializedLogContainer).items.some( + item => item.level === 'debug' && item.body === 'Debug log from Effect', + ) + ); + }); + + await page.goto('/'); + const logButton = page.locator('id=log-button'); + await logButton.click(); + + await expect(page.locator('id=log-result')).toHaveText('Logs sent!'); + + const logEnvelope = await logEnvelopePromise; + const logs = (logEnvelope[1] as SerializedLogContainer).items; + const debugLog = logs.find(log => log.level === 'debug' && log.body === 'Debug log from Effect'); + expect(debugLog).toBeDefined(); + expect(debugLog?.level).toBe('debug'); +}); + +test('should send Effect info logs', async ({ page }) => { + const logEnvelopePromise = waitForEnvelopeItem('effect-browser', envelope => { + return ( + envelope[0].type === 'log' && + (envelope[1] as SerializedLogContainer).items.some( + item => item.level === 'info' && item.body === 'Info log from Effect', + ) + ); + }); + + await page.goto('/'); + const logButton = page.locator('id=log-button'); + await logButton.click(); + + await expect(page.locator('id=log-result')).toHaveText('Logs sent!'); + + const logEnvelope = await logEnvelopePromise; + const logs = (logEnvelope[1] as SerializedLogContainer).items; + const infoLog = logs.find(log => log.level === 'info' && log.body === 'Info log from Effect'); + expect(infoLog).toBeDefined(); + expect(infoLog?.level).toBe('info'); +}); + +test('should send Effect warning logs', async ({ page }) => { + const logEnvelopePromise = waitForEnvelopeItem('effect-browser', envelope => { + return ( + envelope[0].type === 'log' && + (envelope[1] as SerializedLogContainer).items.some( + item => item.level === 'warn' && item.body === 'Warning log from Effect', + ) + ); + }); + + await page.goto('/'); + const logButton = page.locator('id=log-button'); + await logButton.click(); + + await expect(page.locator('id=log-result')).toHaveText('Logs sent!'); + + const logEnvelope = await logEnvelopePromise; + const logs = (logEnvelope[1] as SerializedLogContainer).items; + const warnLog = logs.find(log => log.level === 'warn' && log.body === 'Warning log from Effect'); + expect(warnLog).toBeDefined(); + expect(warnLog?.level).toBe('warn'); +}); + +test('should send Effect error logs', async ({ page }) => { + const logEnvelopePromise = waitForEnvelopeItem('effect-browser', envelope => { + return ( + envelope[0].type === 'log' && + (envelope[1] as SerializedLogContainer).items.some( + item => item.level === 'error' && item.body === 'Error log from Effect', + ) + ); + }); + + await page.goto('/'); + const logButton = page.locator('id=log-button'); + await logButton.click(); + + await expect(page.locator('id=log-result')).toHaveText('Logs sent!'); + + const logEnvelope = await logEnvelopePromise; + const logs = (logEnvelope[1] as SerializedLogContainer).items; + const errorLog = logs.find(log => log.level === 'error' && log.body === 'Error log from Effect'); + expect(errorLog).toBeDefined(); + expect(errorLog?.level).toBe('error'); +}); + +test('should send Effect logs with context attributes', async ({ page }) => { + const logEnvelopePromise = waitForEnvelopeItem('effect-browser', envelope => { + return ( + envelope[0].type === 'log' && + (envelope[1] as SerializedLogContainer).items.some(item => item.body === 'Log with context') + ); + }); + + await page.goto('/'); + const logContextButton = page.locator('id=log-context-button'); + await logContextButton.click(); + + await expect(page.locator('id=log-context-result')).toHaveText('Log with context sent!'); + + const logEnvelope = await logEnvelopePromise; + const logs = (logEnvelope[1] as SerializedLogContainer).items; + const contextLog = logs.find(log => log.body === 'Log with context'); + expect(contextLog).toBeDefined(); + expect(contextLog?.level).toBe('info'); +}); diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/tests/transactions.test.ts b/dev-packages/e2e-tests/test-applications/effect-browser/tests/transactions.test.ts new file mode 100644 index 000000000000..b7c60b488403 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/tests/transactions.test.ts @@ -0,0 +1,120 @@ +import { expect, test } from '@playwright/test'; +import { waitForTransaction } from '@sentry-internal/test-utils'; + +test('captures a pageload transaction', async ({ page }) => { + const transactionPromise = waitForTransaction('effect-browser', async transactionEvent => { + return !!transactionEvent?.transaction && transactionEvent.contexts?.trace?.op === 'pageload'; + }); + + await page.goto('/'); + + const pageLoadTransaction = await transactionPromise; + + expect(pageLoadTransaction).toMatchObject({ + contexts: { + trace: { + data: expect.objectContaining({ + 'sentry.idle_span_finish_reason': 'idleTimeout', + 'sentry.op': 'pageload', + 'sentry.origin': 'auto.pageload.browser', + 'sentry.sample_rate': 1, + 'sentry.source': 'url', + }), + op: 'pageload', + origin: 'auto.pageload.browser', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + }, + }, + environment: 'qa', + event_id: expect.stringMatching(/[a-f0-9]{32}/), + measurements: expect.any(Object), + platform: 'javascript', + release: 'e2e-test', + request: { + headers: { + 'User-Agent': expect.any(String), + }, + url: 'http://localhost:3030/', + }, + spans: expect.any(Array), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + transaction: '/', + transaction_info: { + source: 'url', + }, + type: 'transaction', + }); +}); + +test('captures a navigation transaction', async ({ page }) => { + const pageLoadTransactionPromise = waitForTransaction('effect-browser', async transactionEvent => { + return !!transactionEvent?.transaction && transactionEvent.contexts?.trace?.op === 'pageload'; + }); + + const navigationTransactionPromise = waitForTransaction('effect-browser', async transactionEvent => { + return !!transactionEvent?.transaction && transactionEvent.contexts?.trace?.op === 'navigation'; + }); + + await page.goto('/'); + await pageLoadTransactionPromise; + + const linkElement = page.locator('id=navigation-link'); + await linkElement.click(); + + const navigationTransaction = await navigationTransactionPromise; + + expect(navigationTransaction).toMatchObject({ + contexts: { + trace: { + op: 'navigation', + origin: 'auto.navigation.browser', + }, + }, + transaction: '/', + transaction_info: { + source: 'url', + }, + }); +}); + +test('captures Effect spans with correct parent-child structure', async ({ page }) => { + const pageloadPromise = waitForTransaction('effect-browser', transactionEvent => { + return transactionEvent?.contexts?.trace?.op === 'pageload'; + }); + + const transactionPromise = waitForTransaction('effect-browser', transactionEvent => { + return ( + transactionEvent?.contexts?.trace?.op === 'ui.action.click' && + transactionEvent.spans?.some(span => span.description === 'custom-effect-span') + ); + }); + + await page.goto('/'); + await pageloadPromise; + + const effectSpanButton = page.locator('id=effect-span-button'); + await effectSpanButton.click(); + + await expect(page.locator('id=effect-span-result')).toHaveText('Span sent!'); + + const transactionEvent = await transactionPromise; + const spans = transactionEvent.spans || []; + + expect(spans).toContainEqual( + expect.objectContaining({ + description: 'custom-effect-span', + }), + ); + + expect(spans).toContainEqual( + expect.objectContaining({ + description: 'nested-span', + }), + ); + + const parentSpan = spans.find(s => s.description === 'custom-effect-span'); + const nestedSpan = spans.find(s => s.description === 'nested-span'); + expect(nestedSpan?.parent_span_id).toBe(parentSpan?.span_id); +}); diff --git a/dev-packages/e2e-tests/test-applications/effect-browser/tsconfig.json b/dev-packages/e2e-tests/test-applications/effect-browser/tsconfig.json new file mode 100644 index 000000000000..cb69f25b8d50 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-browser/tsconfig.json @@ -0,0 +1,19 @@ +{ + "compilerOptions": { + "target": "es2018", + "lib": ["dom", "dom.iterable", "esnext"], + "allowJs": true, + "skipLibCheck": true, + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "forceConsistentCasingInFileNames": true, + "noFallthroughCasesInSwitch": true, + "module": "esnext", + "moduleResolution": "node", + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true + }, + "include": ["src", "tests"] +} diff --git a/dev-packages/e2e-tests/test-applications/effect-node/.gitignore b/dev-packages/e2e-tests/test-applications/effect-node/.gitignore new file mode 100644 index 000000000000..f06235c460c2 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-node/.gitignore @@ -0,0 +1,2 @@ +node_modules +dist diff --git a/dev-packages/e2e-tests/test-applications/effect-node/.npmrc b/dev-packages/e2e-tests/test-applications/effect-node/.npmrc new file mode 100644 index 000000000000..070f80f05092 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-node/.npmrc @@ -0,0 +1,2 @@ +@sentry:registry=http://127.0.0.1:4873 +@sentry-internal:registry=http://127.0.0.1:4873 diff --git a/dev-packages/e2e-tests/test-applications/effect-node/package.json b/dev-packages/e2e-tests/test-applications/effect-node/package.json new file mode 100644 index 000000000000..621a017d3020 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-node/package.json @@ -0,0 +1,29 @@ +{ + "name": "effect-node-app", + "version": "1.0.0", + "private": true, + "type": "module", + "scripts": { + "build": "tsc", + "start": "node dist/app.js", + "test": "playwright test", + "clean": "npx rimraf node_modules pnpm-lock.yaml", + "test:build": "pnpm install && pnpm build", + "test:assert": "pnpm test" + }, + "dependencies": { + "@effect/platform": "^0.94.5", + "@effect/platform-node": "^0.104.1", + "@sentry/effect": "latest || *", + "@types/node": "^18.19.1", + "effect": "^3.19.19", + "typescript": "~5.0.0" + }, + "devDependencies": { + "@playwright/test": "~1.56.0", + "@sentry-internal/test-utils": "link:../../../test-utils" + }, + "volta": { + "extends": "../../package.json" + } +} diff --git a/dev-packages/e2e-tests/test-applications/effect-node/playwright.config.mjs b/dev-packages/e2e-tests/test-applications/effect-node/playwright.config.mjs new file mode 100644 index 000000000000..31f2b913b58b --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-node/playwright.config.mjs @@ -0,0 +1,7 @@ +import { getPlaywrightConfig } from '@sentry-internal/test-utils'; + +const config = getPlaywrightConfig({ + startCommand: `pnpm start`, +}); + +export default config; diff --git a/dev-packages/e2e-tests/test-applications/effect-node/src/app.ts b/dev-packages/e2e-tests/test-applications/effect-node/src/app.ts new file mode 100644 index 000000000000..899adfb4aa98 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-node/src/app.ts @@ -0,0 +1,108 @@ +import * as Sentry from '@sentry/effect'; +import { HttpRouter, HttpServer, HttpServerResponse } from '@effect/platform'; +import { NodeHttpServer, NodeRuntime } from '@effect/platform-node'; +import * as Effect from 'effect/Effect'; +import * as Cause from 'effect/Cause'; +import * as Layer from 'effect/Layer'; +import * as Logger from 'effect/Logger'; +import * as LogLevel from 'effect/LogLevel'; +import { createServer } from 'http'; + +const SentryLive = Layer.mergeAll( + Sentry.effectLayer({ + dsn: process.env.E2E_TEST_DSN, + environment: 'qa', + debug: !!process.env.DEBUG, + tunnel: 'http://localhost:3031/', + tracesSampleRate: 1, + enableLogs: true, + }), + Layer.setTracer(Sentry.SentryEffectTracer), + Logger.replace(Logger.defaultLogger, Sentry.SentryEffectLogger), +); + +const router = HttpRouter.empty.pipe( + HttpRouter.get('/test-success', HttpServerResponse.json({ version: 'v1' })), + + HttpRouter.get( + '/test-transaction', + Effect.gen(function* () { + yield* Effect.void.pipe(Effect.withSpan('test-span')); + return yield* HttpServerResponse.json({ status: 'ok' }); + }), + ), + + HttpRouter.get( + '/test-effect-span', + Effect.gen(function* () { + yield* Effect.gen(function* () { + yield* Effect.sleep('50 millis'); + yield* Effect.sleep('25 millis').pipe(Effect.withSpan('nested-span')); + }).pipe(Effect.withSpan('custom-effect-span', { kind: 'internal' })); + return yield* HttpServerResponse.json({ status: 'ok' }); + }), + ), + + HttpRouter.get( + '/test-error', + Effect.gen(function* () { + const exceptionId = Sentry.captureException(new Error('This is an error')); + yield* Effect.promise(() => Sentry.flush(2000)); + return yield* HttpServerResponse.json({ exceptionId }); + }), + ), + + HttpRouter.get( + '/test-exception/:id', + Effect.sync(() => { + throw new Error('This is an exception with id 123'); + }), + ), + + HttpRouter.get('/test-effect-fail', Effect.fail(new Error('Effect failure'))), + + HttpRouter.get('/test-effect-die', Effect.die('Effect defect')), + + HttpRouter.get( + '/test-log', + Effect.gen(function* () { + yield* Effect.logDebug('Debug log from Effect'); + yield* Effect.logInfo('Info log from Effect'); + yield* Effect.logWarning('Warning log from Effect'); + yield* Effect.logError('Error log from Effect'); + return yield* HttpServerResponse.json({ message: 'Logs sent' }); + }), + ), + + HttpRouter.get( + '/test-log-with-context', + Effect.gen(function* () { + yield* Effect.logInfo('Log with context').pipe( + Effect.annotateLogs('userId', '12345'), + Effect.annotateLogs('action', 'test'), + ); + return yield* HttpServerResponse.json({ message: 'Log with context sent' }); + }), + ), + + HttpRouter.catchAllCause(cause => { + const error = Cause.squash(cause); + Sentry.captureException(error); + return Effect.gen(function* () { + yield* Effect.promise(() => Sentry.flush(2000)); + return yield* HttpServerResponse.json({ error: String(error) }, { status: 500 }); + }); + }), +); + +const LogLevelLive = Logger.minimumLogLevel(LogLevel.Debug); + +const ServerLive = router.pipe( + HttpServer.serve(), + HttpServer.withLogAddress, + Layer.provide(NodeHttpServer.layer(createServer, { port: 3030 })), + Layer.provide(SentryLive), + Layer.provide(LogLevelLive), +); + +ServerLive.pipe(Layer.launch, NodeRuntime.runMain); diff --git a/dev-packages/e2e-tests/test-applications/effect-node/start-event-proxy.mjs b/dev-packages/e2e-tests/test-applications/effect-node/start-event-proxy.mjs new file mode 100644 index 000000000000..41eb647958b7 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-node/start-event-proxy.mjs @@ -0,0 +1,6 @@ +import { startEventProxyServer } from '@sentry-internal/test-utils'; + +startEventProxyServer({ + port: 3031, + proxyServerName: 'effect-node', +}); diff --git a/dev-packages/e2e-tests/test-applications/effect-node/tests/errors.test.ts b/dev-packages/e2e-tests/test-applications/effect-node/tests/errors.test.ts new file mode 100644 index 000000000000..3b7da230c0e0 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-node/tests/errors.test.ts @@ -0,0 +1,56 @@ +import { expect, test } from '@playwright/test'; +import { waitForError } from '@sentry-internal/test-utils'; + +test('Captures manually reported error', async ({ baseURL }) => { + const errorEventPromise = waitForError('effect-node', event => { + return !event.type && event.exception?.values?.[0]?.value === 'This is an error'; + }); + + const response = await fetch(`${baseURL}/test-error`); + const body = await response.json(); + + const errorEvent = await errorEventPromise; + + expect(body.exceptionId).toBeDefined(); + expect(errorEvent.exception?.values).toHaveLength(1); + expect(errorEvent.exception?.values?.[0]?.value).toBe('This is an error'); +}); + +test('Captures thrown exception', async ({ baseURL }) => { + const errorEventPromise = waitForError('effect-node', event => { + return !event.type && event.exception?.values?.[0]?.value === 'This is an exception with id 123'; + }); + + await fetch(`${baseURL}/test-exception/123`); + + const errorEvent = await errorEventPromise; + + expect(errorEvent.exception?.values).toHaveLength(1); + expect(errorEvent.exception?.values?.[0]?.value).toBe('This is an exception with id 123'); +}); + +test('Captures Effect.fail as error', async ({ baseURL }) => { + const errorEventPromise = waitForError('effect-node', event => { + return !event.type && event.exception?.values?.[0]?.value === 'Effect failure'; + }); + + await fetch(`${baseURL}/test-effect-fail`); + + const errorEvent = await errorEventPromise; + + expect(errorEvent.exception?.values).toHaveLength(1); + expect(errorEvent.exception?.values?.[0]?.value).toBe('Effect failure'); +}); + +test('Captures Effect.die as error', async ({ baseURL }) => { + const errorEventPromise = waitForError('effect-node', event => { + return !event.type && event.exception?.values?.[0]?.value?.includes('Effect defect'); + }); + + await fetch(`${baseURL}/test-effect-die`); + + const errorEvent = await errorEventPromise; + + expect(errorEvent.exception?.values).toHaveLength(1); + expect(errorEvent.exception?.values?.[0]?.value).toContain('Effect defect'); +}); diff --git a/dev-packages/e2e-tests/test-applications/effect-node/tests/logs.test.ts b/dev-packages/e2e-tests/test-applications/effect-node/tests/logs.test.ts new file mode 100644 index 000000000000..85f5840e14a8 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-node/tests/logs.test.ts @@ -0,0 +1,96 @@ +import { expect, test } from '@playwright/test'; +import { waitForEnvelopeItem } from '@sentry-internal/test-utils'; +import type { SerializedLogContainer } from '@sentry/core'; + +test('should send Effect debug logs', async ({ baseURL }) => { + const logEnvelopePromise = waitForEnvelopeItem('effect-node', envelope => { + return ( + envelope[0].type === 'log' && + (envelope[1] as SerializedLogContainer).items.some( + item => item.level === 'debug' && item.body === 'Debug log from Effect', + ) + ); + }); + + await fetch(`${baseURL}/test-log`); + + const logEnvelope = await logEnvelopePromise; + const logs = (logEnvelope[1] as SerializedLogContainer).items; + const debugLog = logs.find(log => log.level === 'debug' && log.body === 'Debug log from Effect'); + expect(debugLog).toBeDefined(); + expect(debugLog?.level).toBe('debug'); +}); + +test('should send Effect info logs', async ({ baseURL }) => { + const logEnvelopePromise = waitForEnvelopeItem('effect-node', envelope => { + return ( + envelope[0].type === 'log' && + (envelope[1] as SerializedLogContainer).items.some( + item => item.level === 'info' && item.body === 'Info log from Effect', + ) + ); + }); + + await fetch(`${baseURL}/test-log`); + + const logEnvelope = await logEnvelopePromise; + const logs = (logEnvelope[1] as SerializedLogContainer).items; + const infoLog = logs.find(log => log.level === 'info' && log.body === 'Info log from Effect'); + expect(infoLog).toBeDefined(); + expect(infoLog?.level).toBe('info'); +}); + +test('should send Effect warning logs', async ({ baseURL }) => { + const logEnvelopePromise = waitForEnvelopeItem('effect-node', envelope => { + return ( + envelope[0].type === 'log' && + (envelope[1] as SerializedLogContainer).items.some( + item => item.level === 'warn' && item.body === 'Warning log from Effect', + ) + ); + }); + + await fetch(`${baseURL}/test-log`); + + const logEnvelope = await logEnvelopePromise; + const logs = (logEnvelope[1] as SerializedLogContainer).items; + const warnLog = logs.find(log => log.level === 'warn' && log.body === 'Warning log from Effect'); + expect(warnLog).toBeDefined(); + expect(warnLog?.level).toBe('warn'); +}); + +test('should send Effect error logs', async ({ baseURL }) => { + const logEnvelopePromise = waitForEnvelopeItem('effect-node', envelope => { + return ( + envelope[0].type === 'log' && + (envelope[1] as SerializedLogContainer).items.some( + item => item.level === 'error' && item.body === 'Error log from Effect', + ) + ); + }); + + await fetch(`${baseURL}/test-log`); + + const logEnvelope = await logEnvelopePromise; + const logs = (logEnvelope[1] as SerializedLogContainer).items; + const errorLog = logs.find(log => log.level === 'error' && log.body === 'Error log from Effect'); + expect(errorLog).toBeDefined(); + expect(errorLog?.level).toBe('error'); +}); + +test('should send Effect logs with context attributes', async ({ baseURL }) => { + const logEnvelopePromise = waitForEnvelopeItem('effect-node', envelope => { + return ( + envelope[0].type === 'log' && + (envelope[1] as SerializedLogContainer).items.some(item => item.body === 'Log with context') + ); + }); + + await fetch(`${baseURL}/test-log-with-context`); + + const logEnvelope = await logEnvelopePromise; + const logs = (logEnvelope[1] as SerializedLogContainer).items; + const contextLog = logs.find(log => log.body === 'Log with context'); + expect(contextLog).toBeDefined(); + expect(contextLog?.level).toBe('info'); +}); diff --git a/dev-packages/e2e-tests/test-applications/effect-node/tests/transactions.test.ts b/dev-packages/e2e-tests/test-applications/effect-node/tests/transactions.test.ts new file mode 100644 index 000000000000..ed7a58fa28df --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-node/tests/transactions.test.ts @@ -0,0 +1,99 @@ +import { expect, test } from '@playwright/test'; +import { waitForTransaction } from '@sentry-internal/test-utils'; + +test('Sends an HTTP transaction', async ({ baseURL }) => { + const transactionEventPromise = waitForTransaction('effect-node', transactionEvent => { + return transactionEvent?.transaction === 'http.server GET'; + }); + + await fetch(`${baseURL}/test-success`); + + const transactionEvent = await transactionEventPromise; + + expect(transactionEvent.transaction).toBe('http.server GET'); +}); + +test('Sends transaction with manual Effect span', async ({ baseURL }) => { + const transactionEventPromise = waitForTransaction('effect-node', transactionEvent => { + return ( + transactionEvent?.transaction === 'http.server GET' && + transactionEvent?.spans?.some(span => span.description === 'test-span') + ); + }); + + await fetch(`${baseURL}/test-transaction`); + + const transactionEvent = await transactionEventPromise; + + expect(transactionEvent.transaction).toBe('http.server GET'); + + const spans = transactionEvent.spans || []; + expect(spans).toEqual([ + expect.objectContaining({ + description: 'test-span', + }), + ]); +}); + +test('Sends Effect spans with correct parent-child structure', async ({ baseURL }) => { + const transactionEventPromise = waitForTransaction('effect-node', transactionEvent => { + return ( + transactionEvent?.transaction === 'http.server GET' && + transactionEvent?.spans?.some(span => span.description === 'custom-effect-span') + ); + }); + + await fetch(`${baseURL}/test-effect-span`); + + const transactionEvent = await transactionEventPromise; + + expect(transactionEvent.transaction).toBe('http.server GET'); + + expect(transactionEvent).toEqual( + expect.objectContaining({ + contexts: expect.objectContaining({ + trace: expect.objectContaining({ + origin: 'auto.http.effect', + }), + }), + spans: [ + expect.objectContaining({ + description: 'custom-effect-span', + origin: 'auto.function.effect', + }), + expect.objectContaining({ + description: 'nested-span', + origin: 'auto.function.effect', + }), + ], + sdk: expect.objectContaining({ + name: 'sentry.javascript.effect', + packages: [ + expect.objectContaining({ + name: 'npm:@sentry/effect', + }), + expect.objectContaining({ + name: 'npm:@sentry/node-light', + }), + ], + }), + }), + ); + + const parentSpan = transactionEvent.spans?.[0]?.span_id; + const nestedSpan = transactionEvent.spans?.[1]?.parent_span_id; + + expect(nestedSpan).toBe(parentSpan); +}); + +test('Sends transaction for error route', async ({ baseURL }) => { + const transactionEventPromise = waitForTransaction('effect-node', transactionEvent => { + return transactionEvent?.transaction === 'http.server GET'; + }); + + await fetch(`${baseURL}/test-error`); + + const transactionEvent = await transactionEventPromise; + + expect(transactionEvent.transaction).toBe('http.server GET'); +}); diff --git a/dev-packages/e2e-tests/test-applications/effect-node/tsconfig.json b/dev-packages/e2e-tests/test-applications/effect-node/tsconfig.json new file mode 100644 index 000000000000..2cc9aca23e0e --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/effect-node/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "outDir": "dist", + "rootDir": "src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "declaration": false + }, + "include": ["src"] +} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-11/tests/cron-decorator.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-11/tests/cron-decorator.test.ts index bf5e29004066..18f084800fcf 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-11/tests/cron-decorator.test.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-11/tests/cron-decorator.test.ts @@ -64,7 +64,11 @@ test('Cron job triggers send of in_progress envelope', async ({ baseURL }) => { test('Sends exceptions to Sentry on error in cron job', async ({ baseURL }) => { const errorEventPromise = waitForError('nestjs-11', event => { - return !event.type && event.exception?.values?.[0]?.value === 'Test error from cron job'; + return ( + !event.type && + event.exception?.values?.[0]?.value === 'Test error from cron job' && + event.exception?.values?.[0]?.mechanism?.type === 'auto.function.nestjs.cron' + ); }); const errorEvent = await errorEventPromise; @@ -73,7 +77,7 @@ test('Sends exceptions to Sentry on error in cron job', async ({ baseURL }) => { expect(errorEvent.exception?.values?.[0]?.value).toBe('Test error from cron job'); expect(errorEvent.exception?.values?.[0]?.mechanism).toEqual({ handled: false, - type: 'auto.cron.nestjs.async', + type: 'auto.function.nestjs.cron', }); expect(errorEvent.contexts?.trace).toEqual({ diff --git a/dev-packages/e2e-tests/test-applications/nestjs-11/tests/transactions.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-11/tests/transactions.test.ts index d0f34311822e..a8b8f25e46c1 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-11/tests/transactions.test.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-11/tests/transactions.test.ts @@ -232,7 +232,7 @@ test('API route transaction includes nest guard span and span started in guard i trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.guard', }, description: 'ExampleGuard', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -240,7 +240,7 @@ test('API route transaction includes nest guard span and span started in guard i timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.guard', }, ]), }), @@ -296,7 +296,7 @@ test('API route transaction includes nest pipe span for valid request', async ({ trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.pipe', }, description: 'ParseIntPipe', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -304,7 +304,7 @@ test('API route transaction includes nest pipe span for valid request', async ({ timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.pipe', }, ]), }), @@ -333,7 +333,7 @@ test('API route transaction includes nest pipe span for invalid request', async trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.pipe', }, description: 'ParseIntPipe', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -341,7 +341,7 @@ test('API route transaction includes nest pipe span for invalid request', async timestamp: expect.any(Number), status: 'internal_error', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.pipe', }, ]), }), @@ -372,7 +372,7 @@ test('API route transaction includes nest interceptor spans before route executi trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'ExampleInterceptor1', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -380,14 +380,14 @@ test('API route transaction includes nest interceptor spans before route executi timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, { span_id: expect.stringMatching(/[a-f0-9]{16}/), trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'ExampleInterceptor2', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -395,7 +395,7 @@ test('API route transaction includes nest interceptor spans before route executi timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -490,7 +490,7 @@ test('API route transaction includes exactly one nest interceptor span after rou trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'Interceptors - After Route', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -498,7 +498,7 @@ test('API route transaction includes exactly one nest interceptor span after rou timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -572,7 +572,7 @@ test('API route transaction includes nest async interceptor spans before route e trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'AsyncInterceptor', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -580,7 +580,7 @@ test('API route transaction includes nest async interceptor spans before route e timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -657,7 +657,7 @@ test('API route transaction includes exactly one nest async interceptor span aft trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'Interceptors - After Route', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -665,7 +665,7 @@ test('API route transaction includes exactly one nest async interceptor span aft timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), diff --git a/dev-packages/e2e-tests/test-applications/nestjs-8/tests/transactions.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-8/tests/transactions.test.ts index f5f1c64a9726..862f730636c0 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-8/tests/transactions.test.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-8/tests/transactions.test.ts @@ -236,7 +236,7 @@ test('API route transaction includes nest guard span and span started in guard i trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.guard', }, description: 'ExampleGuard', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -244,7 +244,7 @@ test('API route transaction includes nest guard span and span started in guard i timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.guard', }, ]), }), @@ -300,7 +300,7 @@ test('API route transaction includes nest pipe span for valid request', async ({ trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.pipe', }, description: 'ParseIntPipe', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -308,7 +308,7 @@ test('API route transaction includes nest pipe span for valid request', async ({ timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.pipe', }, ]), }), @@ -337,7 +337,7 @@ test('API route transaction includes nest pipe span for invalid request', async trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.pipe', }, description: 'ParseIntPipe', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -345,7 +345,7 @@ test('API route transaction includes nest pipe span for invalid request', async timestamp: expect.any(Number), status: 'internal_error', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.pipe', }, ]), }), @@ -376,7 +376,7 @@ test('API route transaction includes nest interceptor spans before route executi trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'ExampleInterceptor1', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -384,14 +384,14 @@ test('API route transaction includes nest interceptor spans before route executi timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, { span_id: expect.stringMatching(/[a-f0-9]{16}/), trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'ExampleInterceptor2', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -399,7 +399,7 @@ test('API route transaction includes nest interceptor spans before route executi timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -494,7 +494,7 @@ test('API route transaction includes exactly one nest interceptor span after rou trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'Interceptors - After Route', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -502,7 +502,7 @@ test('API route transaction includes exactly one nest interceptor span after rou timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -576,7 +576,7 @@ test('API route transaction includes nest async interceptor spans before route e trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'AsyncInterceptor', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -584,7 +584,7 @@ test('API route transaction includes nest async interceptor spans before route e timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -661,7 +661,7 @@ test('API route transaction includes exactly one nest async interceptor span aft trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'Interceptors - After Route', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -669,7 +669,7 @@ test('API route transaction includes exactly one nest async interceptor span aft timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), diff --git a/dev-packages/e2e-tests/test-applications/nestjs-basic/src/app.controller.ts b/dev-packages/e2e-tests/test-applications/nestjs-basic/src/app.controller.ts index 035106a14b21..6186c26cc65c 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-basic/src/app.controller.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-basic/src/app.controller.ts @@ -2,6 +2,7 @@ import { Controller, Get, Param, ParseIntPipe, UseFilters, UseGuards, UseInterce import { flush } from '@sentry/nestjs'; import { AppService } from './app.service'; import { AsyncInterceptor } from './async-example.interceptor'; +import { ScheduleService } from './schedule.service'; import { ExampleInterceptor1 } from './example-1.interceptor'; import { ExampleInterceptor2 } from './example-2.interceptor'; import { ExampleExceptionGlobalFilter } from './example-global-filter.exception'; @@ -12,7 +13,10 @@ import { ExampleGuard } from './example.guard'; @Controller() @UseFilters(ExampleLocalFilter) export class AppController { - constructor(private readonly appService: AppService) {} + constructor( + private readonly appService: AppService, + private readonly scheduleService: ScheduleService, + ) {} @Get('test-transaction') testTransaction() { @@ -87,6 +91,30 @@ export class AppController { this.appService.killTestCron(job); } + @Get('kill-test-schedule-cron/:name') + killTestScheduleCron(@Param('name') name: string) { + this.scheduleService.killCron(name); + } + + @Get('kill-test-schedule-interval/:name') + killTestScheduleInterval(@Param('name') name: string) { + this.scheduleService.killInterval(name); + } + + @Get('test-schedule-isolation') + testScheduleIsolation() { + return { message: 'ok' }; + } + + @Get('trigger-schedule-timeout-error') + async triggerScheduleTimeoutError() { + // Manually calls the @Timeout-decorated method to test instrumentation + // without relying on NestJS scheduler timing. + // Without this, it's hard to get the timing right for the test. + await this.scheduleService.handleTimeoutError(); + return { message: 'triggered' }; + } + @Get('flush') async flush() { await flush(); diff --git a/dev-packages/e2e-tests/test-applications/nestjs-basic/src/app.module.ts b/dev-packages/e2e-tests/test-applications/nestjs-basic/src/app.module.ts index 3de3c82dc925..7393e9b438c2 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-basic/src/app.module.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-basic/src/app.module.ts @@ -6,12 +6,14 @@ import { AppController } from './app.controller'; import { AppService } from './app.service'; import { ExampleGlobalFilter } from './example-global.filter'; import { ExampleMiddleware } from './example.middleware'; +import { ScheduleService } from './schedule.service'; @Module({ imports: [SentryModule.forRoot(), ScheduleModule.forRoot()], controllers: [AppController], providers: [ AppService, + ScheduleService, { provide: APP_FILTER, useClass: SentryGlobalFilter, diff --git a/dev-packages/e2e-tests/test-applications/nestjs-basic/src/schedule.service.ts b/dev-packages/e2e-tests/test-applications/nestjs-basic/src/schedule.service.ts new file mode 100644 index 000000000000..38b56136ab20 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-basic/src/schedule.service.ts @@ -0,0 +1,45 @@ +import { Injectable } from '@nestjs/common'; +import { Cron, Interval, SchedulerRegistry, Timeout } from '@nestjs/schedule'; +import * as Sentry from '@sentry/nestjs'; + +@Injectable() +export class ScheduleService { + constructor(private schedulerRegistry: SchedulerRegistry) {} + + // @Cron error test (auto-instrumentation, no @SentryCron) + @Cron('*/5 * * * * *', { name: 'test-schedule-cron-error' }) + handleCronError() { + throw new Error('Test error from schedule cron'); + } + + // @Interval error test + @Interval('test-schedule-interval-error', 2000) + async handleIntervalError() { + throw new Error('Test error from schedule interval'); + } + + // @Timeout error test + // Use a very long delay so this doesn't fire on its own during tests. + // The test triggers the method via an HTTP endpoint instead. + @Timeout('test-schedule-timeout-error', 60000) + async handleTimeoutError() { + throw new Error('Test error from schedule timeout'); + } + + // Isolation scope test: adds breadcrumb that should NOT leak to HTTP requests + @Interval('test-schedule-isolation', 2000) + handleIsolationBreadcrumb() { + Sentry.addBreadcrumb({ + message: 'leaked-breadcrumb-from-schedule', + level: 'info', + }); + } + + killCron(name: string) { + this.schedulerRegistry.deleteCronJob(name); + } + + killInterval(name: string) { + this.schedulerRegistry.deleteInterval(name); + } +} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/cron-decorator.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/cron-decorator.test.ts index e0610f36c676..6aeeae723a64 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/cron-decorator.test.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/cron-decorator.test.ts @@ -64,7 +64,11 @@ test('Cron job triggers send of in_progress envelope', async ({ baseURL }) => { test('Sends exceptions to Sentry on error in async cron job', async ({ baseURL }) => { const errorEventPromise = waitForError('nestjs-basic', event => { - return !event.type && event.exception?.values?.[0]?.value === 'Test error from cron async job'; + return ( + !event.type && + event.exception?.values?.[0]?.value === 'Test error from cron async job' && + event.exception?.values?.[0]?.mechanism?.type === 'auto.function.nestjs.cron' + ); }); const errorEvent = await errorEventPromise; @@ -77,7 +81,7 @@ test('Sends exceptions to Sentry on error in async cron job', async ({ baseURL } expect(errorEvent.exception?.values?.[0]?.mechanism).toEqual({ handled: false, - type: 'auto.cron.nestjs.async', + type: 'auto.function.nestjs.cron', }); // kill cron so tests don't get stuck @@ -86,7 +90,11 @@ test('Sends exceptions to Sentry on error in async cron job', async ({ baseURL } test('Sends exceptions to Sentry on error in sync cron job', async ({ baseURL }) => { const errorEventPromise = waitForError('nestjs-basic', event => { - return !event.type && event.exception?.values?.[0]?.value === 'Test error from cron sync job'; + return ( + !event.type && + event.exception?.values?.[0]?.value === 'Test error from cron sync job' && + event.exception?.values?.[0]?.mechanism?.type === 'auto.function.nestjs.cron' + ); }); const errorEvent = await errorEventPromise; @@ -99,7 +107,7 @@ test('Sends exceptions to Sentry on error in sync cron job', async ({ baseURL }) expect(errorEvent.exception?.values?.[0]?.mechanism).toEqual({ handled: false, - type: 'auto.cron.nestjs', + type: 'auto.function.nestjs.cron', }); // kill cron so tests don't get stuck diff --git a/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/schedule-instrumentation.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/schedule-instrumentation.test.ts new file mode 100644 index 000000000000..cffca36be97e --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/schedule-instrumentation.test.ts @@ -0,0 +1,93 @@ +import { expect, test } from '@playwright/test'; +import { waitForError, waitForTransaction } from '@sentry-internal/test-utils'; + +test('Sends exceptions to Sentry on error in @Cron decorated method', async ({ baseURL }) => { + const errorEventPromise = waitForError('nestjs-basic', event => { + return ( + !event.type && + event.exception?.values?.[0]?.value === 'Test error from schedule cron' && + event.exception?.values?.[0]?.mechanism?.type === 'auto.function.nestjs.cron' + ); + }); + + const errorEvent = await errorEventPromise; + + expect(errorEvent.exception?.values).toHaveLength(1); + expect(errorEvent.exception?.values?.[0]?.mechanism).toEqual({ + handled: false, + type: 'auto.function.nestjs.cron', + }); + + // kill cron so tests don't get stuck + await fetch(`${baseURL}/kill-test-schedule-cron/test-schedule-cron-error`); +}); + +test('Sends exceptions to Sentry on error in @Interval decorated method', async ({ baseURL }) => { + const errorEventPromise = waitForError('nestjs-basic', event => { + return ( + !event.type && + event.exception?.values?.[0]?.value === 'Test error from schedule interval' && + event.exception?.values?.[0]?.mechanism?.type === 'auto.function.nestjs.interval' + ); + }); + + const errorEvent = await errorEventPromise; + + expect(errorEvent.exception?.values).toHaveLength(1); + expect(errorEvent.exception?.values?.[0]?.mechanism).toEqual({ + handled: false, + type: 'auto.function.nestjs.interval', + }); + + // kill interval so tests don't get stuck + await fetch(`${baseURL}/kill-test-schedule-interval/test-schedule-interval-error`); +}); + +test('Sends exceptions to Sentry on error in @Timeout decorated method', async ({ baseURL }) => { + const errorEventPromise = waitForError('nestjs-basic', event => { + return ( + !event.type && + event.exception?.values?.[0]?.value === 'Test error from schedule timeout' && + event.exception?.values?.[0]?.mechanism?.type === 'auto.function.nestjs.timeout' + ); + }); + + // Trigger the @Timeout-decorated method via HTTP endpoint since @Timeout + // fires once and timing is unreliable across test runs. + await fetch(`${baseURL}/trigger-schedule-timeout-error`).catch(() => { + // Expected to fail since the handler throws + }); + + const errorEvent = await errorEventPromise; + + expect(errorEvent.exception?.values).toHaveLength(1); + expect(errorEvent.exception?.values?.[0]?.mechanism).toEqual({ + handled: false, + type: 'auto.function.nestjs.timeout', + }); +}); + +test('Scheduled task breadcrumbs do not leak into subsequent HTTP requests', async ({ baseURL }) => { + // The app runs @Interval('test-schedule-isolation', 2000) which adds a breadcrumb. + // Without isolation scope forking, this breadcrumb leaks into the default isolation scope + // and gets cloned into subsequent HTTP requests. + + // Wait for at least one interval tick to fire + await new Promise(resolve => setTimeout(resolve, 3000)); + + const transactionPromise = waitForTransaction('nestjs-basic', transactionEvent => { + return transactionEvent.transaction === 'GET /test-schedule-isolation'; + }); + + await fetch(`${baseURL}/test-schedule-isolation`); + + const transaction = await transactionPromise; + + const leakedBreadcrumb = (transaction.breadcrumbs || []).find( + (b: any) => b.message === 'leaked-breadcrumb-from-schedule', + ); + expect(leakedBreadcrumb).toBeUndefined(); + + // kill interval so tests don't get stuck + await fetch(`${baseURL}/kill-test-schedule-interval/test-schedule-isolation`); +}); diff --git a/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/transactions.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/transactions.test.ts index 508c1e670946..440a1391556a 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/transactions.test.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-basic/tests/transactions.test.ts @@ -232,7 +232,7 @@ test('API route transaction includes nest guard span and span started in guard i trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.guard', }, description: 'ExampleGuard', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -240,7 +240,7 @@ test('API route transaction includes nest guard span and span started in guard i timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.guard', }, ]), }), @@ -296,7 +296,7 @@ test('API route transaction includes nest pipe span for valid request', async ({ trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.pipe', }, description: 'ParseIntPipe', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -304,7 +304,7 @@ test('API route transaction includes nest pipe span for valid request', async ({ timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.pipe', }, ]), }), @@ -333,7 +333,7 @@ test('API route transaction includes nest pipe span for invalid request', async trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.pipe', }, description: 'ParseIntPipe', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -341,7 +341,7 @@ test('API route transaction includes nest pipe span for invalid request', async timestamp: expect.any(Number), status: 'internal_error', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.pipe', }, ]), }), @@ -372,7 +372,7 @@ test('API route transaction includes nest interceptor spans before route executi trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'ExampleInterceptor1', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -380,14 +380,14 @@ test('API route transaction includes nest interceptor spans before route executi timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, { span_id: expect.stringMatching(/[a-f0-9]{16}/), trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'ExampleInterceptor2', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -395,7 +395,7 @@ test('API route transaction includes nest interceptor spans before route executi timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -490,7 +490,7 @@ test('API route transaction includes exactly one nest interceptor span after rou trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'Interceptors - After Route', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -498,7 +498,7 @@ test('API route transaction includes exactly one nest interceptor span after rou timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -572,7 +572,7 @@ test('API route transaction includes nest async interceptor spans before route e trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'AsyncInterceptor', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -580,7 +580,7 @@ test('API route transaction includes nest async interceptor spans before route e timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -657,7 +657,7 @@ test('API route transaction includes exactly one nest async interceptor span aft trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'Interceptors - After Route', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -665,7 +665,7 @@ test('API route transaction includes exactly one nest async interceptor span aft timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/.npmrc b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/.npmrc new file mode 100644 index 000000000000..070f80f05092 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/.npmrc @@ -0,0 +1,2 @@ +@sentry:registry=http://127.0.0.1:4873 +@sentry-internal:registry=http://127.0.0.1:4873 diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/docker-compose.yml b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/docker-compose.yml new file mode 100644 index 000000000000..53518dbe5195 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/docker-compose.yml @@ -0,0 +1,7 @@ +services: + redis: + image: redis:8 + restart: always + container_name: e2e-tests-nestjs-bullmq-redis + ports: + - '6379:6379' diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/global-setup.mjs b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/global-setup.mjs new file mode 100644 index 000000000000..438b88b61794 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/global-setup.mjs @@ -0,0 +1,13 @@ +import { execSync } from 'child_process'; +import { dirname } from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); + +export default async function globalSetup() { + // Start Redis via Docker Compose + execSync('docker compose up -d --wait', { + cwd: __dirname, + stdio: 'inherit', + }); +} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/global-teardown.mjs b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/global-teardown.mjs new file mode 100644 index 000000000000..35ce41179193 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/global-teardown.mjs @@ -0,0 +1,13 @@ +import { execSync } from 'child_process'; +import { dirname } from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); + +export default async function globalTeardown() { + // Stop Redis and remove containers + execSync('docker compose down --volumes', { + cwd: __dirname, + stdio: 'inherit', + }); +} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/nest-cli.json b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/nest-cli.json new file mode 100644 index 000000000000..f9aa683b1ad5 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/nest-cli.json @@ -0,0 +1,8 @@ +{ + "$schema": "https://json.schemastore.org/nest-cli", + "collection": "@nestjs/schematics", + "sourceRoot": "src", + "compilerOptions": { + "deleteOutDir": true + } +} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/package.json b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/package.json new file mode 100644 index 000000000000..77d8c024e021 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/package.json @@ -0,0 +1,36 @@ +{ + "name": "nestjs-bullmq", + "version": "0.0.1", + "private": true, + "scripts": { + "build": "nest build", + "start": "nest start", + "start:dev": "nest start --watch", + "start:prod": "node dist/main", + "clean": "npx rimraf node_modules pnpm-lock.yaml", + "test": "playwright test", + "test:build": "pnpm install && pnpm build", + "test:assert": "pnpm test" + }, + "dependencies": { + "@nestjs/common": "^11.0.0", + "@nestjs/core": "^11.0.0", + "@nestjs/platform-express": "^11.0.0", + "@nestjs/bullmq": "^11.0.0", + "bullmq": "^5.0.0", + "@sentry/nestjs": "latest || *", + "reflect-metadata": "^0.2.0", + "rxjs": "^7.8.1" + }, + "devDependencies": { + "@playwright/test": "~1.56.0", + "@sentry-internal/test-utils": "link:../../../test-utils", + "@nestjs/cli": "^11.0.0", + "@nestjs/schematics": "^11.0.0", + "@types/node": "^18.19.1", + "typescript": "~5.0.0" + }, + "volta": { + "extends": "../../package.json" + } +} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/playwright.config.mjs b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/playwright.config.mjs new file mode 100644 index 000000000000..d5fd0b394f15 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/playwright.config.mjs @@ -0,0 +1,11 @@ +import { getPlaywrightConfig } from '@sentry-internal/test-utils'; + +const config = getPlaywrightConfig({ + startCommand: `pnpm start`, +}); + +export default { + ...config, + globalSetup: './global-setup.mjs', + globalTeardown: './global-teardown.mjs', +}; diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/app.controller.ts b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/app.controller.ts new file mode 100644 index 000000000000..e8c865e17bcc --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/app.controller.ts @@ -0,0 +1,21 @@ +import { Controller, Get, Param } from '@nestjs/common'; +import { InjectQueue } from '@nestjs/bullmq'; +import { Queue } from 'bullmq'; + +@Controller() +export class AppController { + constructor(@InjectQueue('test-queue') private readonly queue: Queue) {} + + @Get('enqueue/:name') + async enqueue(@Param('name') name: string) { + await this.queue.add(name, { timestamp: Date.now() }); + return { queued: true }; + } + + @Get('check-isolation') + checkIsolation() { + // This endpoint is called after the processor adds a breadcrumb. + // The test verifies that breadcrumbs from the processor do NOT leak here. + return { message: 'ok' }; + } +} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/app.module.ts b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/app.module.ts new file mode 100644 index 000000000000..be5fd107e4cb --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/app.module.ts @@ -0,0 +1,25 @@ +import { Module } from '@nestjs/common'; +import { APP_FILTER } from '@nestjs/core'; +import { BullModule } from '@nestjs/bullmq'; +import { SentryGlobalFilter, SentryModule } from '@sentry/nestjs/setup'; +import { AppController } from './app.controller'; +import { TestProcessor } from './jobs/test.processor'; + +@Module({ + imports: [ + SentryModule.forRoot(), + BullModule.forRoot({ + connection: { host: 'localhost', port: 6379 }, + }), + BullModule.registerQueue({ name: 'test-queue' }), + ], + controllers: [AppController], + providers: [ + TestProcessor, + { + provide: APP_FILTER, + useClass: SentryGlobalFilter, + }, + ], +}) +export class AppModule {} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/instrument.ts b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/instrument.ts new file mode 100644 index 000000000000..4f16ebb36d11 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/instrument.ts @@ -0,0 +1,12 @@ +import * as Sentry from '@sentry/nestjs'; + +Sentry.init({ + environment: 'qa', // dynamic sampling bias to keep transactions + dsn: process.env.E2E_TEST_DSN, + tunnel: `http://localhost:3031/`, // proxy server + tracesSampleRate: 1, + transportOptions: { + // We expect the app to send a lot of events in a short time + bufferSize: 1000, + }, +}); diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/jobs/test.processor.ts b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/jobs/test.processor.ts new file mode 100644 index 000000000000..1c6cf8ef3052 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/jobs/test.processor.ts @@ -0,0 +1,71 @@ +import { OnWorkerEvent, Processor, WorkerHost } from '@nestjs/bullmq'; +import { Job } from 'bullmq'; +import * as Sentry from '@sentry/nestjs'; + +@Processor('test-queue') +export class TestProcessor extends WorkerHost { + async process(job: Job): Promise { + if (job.name === 'fail') { + throw new Error('Test error from BullMQ processor'); + } + + if (job.name === 'breadcrumb-test') { + Sentry.addBreadcrumb({ + message: 'leaked-breadcrumb-from-bullmq-processor', + level: 'info', + }); + return { processed: true }; + } + + if (job.name === 'lifecycle-failed-breadcrumb-test') { + throw new Error('Intentional error to trigger failed event'); + } + + if (job.name === 'lifecycle-progress-breadcrumb-test') { + await job.updateProgress(50); + return { processed: true }; + } + + return { processed: true }; + } + + @OnWorkerEvent('completed') + onCompleted(job: Job) { + if (job.name === 'lifecycle-breadcrumb-test') { + Sentry.addBreadcrumb({ + message: 'leaked-breadcrumb-from-lifecycle-event', + level: 'info', + }); + } + } + + @OnWorkerEvent('active') + onActive(job: Job) { + if (job.name === 'lifecycle-active-breadcrumb-test') { + Sentry.addBreadcrumb({ + message: 'leaked-breadcrumb-from-active-event', + level: 'info', + }); + } + } + + @OnWorkerEvent('failed') + onFailed(job: Job) { + if (job.name === 'lifecycle-failed-breadcrumb-test') { + Sentry.addBreadcrumb({ + message: 'leaked-breadcrumb-from-failed-event', + level: 'info', + }); + } + } + + @OnWorkerEvent('progress') + onProgress(job: Job) { + if (job.name === 'lifecycle-progress-breadcrumb-test') { + Sentry.addBreadcrumb({ + message: 'leaked-breadcrumb-from-progress-event', + level: 'info', + }); + } + } +} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/main.ts b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/main.ts new file mode 100644 index 000000000000..71ce685f4d61 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/src/main.ts @@ -0,0 +1,15 @@ +// Import this first +import './instrument'; + +// Import other modules +import { NestFactory } from '@nestjs/core'; +import { AppModule } from './app.module'; + +const PORT = 3030; + +async function bootstrap() { + const app = await NestFactory.create(AppModule); + await app.listen(PORT); +} + +bootstrap(); diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/start-event-proxy.mjs b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/start-event-proxy.mjs new file mode 100644 index 000000000000..fe8225afa969 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/start-event-proxy.mjs @@ -0,0 +1,6 @@ +import { startEventProxyServer } from '@sentry-internal/test-utils'; + +startEventProxyServer({ + port: 3031, + proxyServerName: 'nestjs-bullmq', +}); diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/tests/bullmq.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/tests/bullmq.test.ts new file mode 100644 index 000000000000..e49ebd80488c --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/tests/bullmq.test.ts @@ -0,0 +1,176 @@ +import { expect, test } from '@playwright/test'; +import { waitForError, waitForTransaction } from '@sentry-internal/test-utils'; + +test('Sends exception to Sentry on error in @Processor process method', async ({ baseURL }) => { + const errorEventPromise = waitForError('nestjs-bullmq', event => { + return ( + !event.type && + event.exception?.values?.[0]?.value === 'Test error from BullMQ processor' && + event.exception?.values?.[0]?.mechanism?.type === 'auto.queue.nestjs.bullmq' + ); + }); + + // Enqueue a job that will fail + await fetch(`${baseURL}/enqueue/fail`); + + const errorEvent = await errorEventPromise; + + expect(errorEvent.exception?.values).toHaveLength(1); + expect(errorEvent.exception?.values?.[0]?.mechanism).toEqual({ + handled: false, + type: 'auto.queue.nestjs.bullmq', + }); +}); + +test('Creates a transaction for successful job processing', async ({ baseURL }) => { + const transactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.contexts?.trace?.op === 'queue.process'; + }); + + // Enqueue a job that will succeed + await fetch(`${baseURL}/enqueue/success`); + + const transaction = await transactionPromise; + + expect(transaction.transaction).toBe('test-queue process'); + expect(transaction.contexts?.trace?.op).toBe('queue.process'); + expect(transaction.contexts?.trace?.origin).toBe('auto.queue.nestjs.bullmq'); +}); + +test('BullMQ processor breadcrumbs do not leak into subsequent HTTP requests', async ({ baseURL }) => { + const processTransactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.contexts?.trace?.op === 'queue.process'; + }); + + // Enqueue a job that adds a breadcrumb during processing + await fetch(`${baseURL}/enqueue/breadcrumb-test`); + + await processTransactionPromise; + + const transactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.transaction === 'GET /check-isolation'; + }); + + await fetch(`${baseURL}/check-isolation`); + + const transaction = await transactionPromise; + + const leakedBreadcrumb = (transaction.breadcrumbs || []).find( + (b: any) => b.message === 'leaked-breadcrumb-from-bullmq-processor', + ); + expect(leakedBreadcrumb).toBeUndefined(); +}); + +// TODO: @OnWorkerEvent('completed') handlers run outside the isolation scope created by process(). +// They are registered via worker.on() (EventEmitter), so breadcrumbs/tags set there +// leak into the default isolation scope and appear on subsequent HTTP requests. +test('BullMQ @OnWorkerEvent completed lifecycle breadcrumbs currently leak into subsequent HTTP requests', async ({ + baseURL, +}) => { + const processTransactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.contexts?.trace?.op === 'queue.process'; + }); + + // Enqueue a job (the completed event fires right after the job is processed) + await fetch(`${baseURL}/enqueue/lifecycle-breadcrumb-test`); + + await processTransactionPromise; + + const transactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.transaction === 'GET /check-isolation'; + }); + + await fetch(`${baseURL}/check-isolation`); + + const transaction = await transactionPromise; + + const leakedBreadcrumb = (transaction.breadcrumbs || []).find( + (b: any) => b.message === 'leaked-breadcrumb-from-lifecycle-event', + ); + // This SHOULD be toBeUndefined() once lifecycle event isolation is implemented. + expect(leakedBreadcrumb).toBeDefined(); +}); + +// TODO: @OnWorkerEvent('active') handlers run outside the isolation scope created by process(). +// Breadcrumbs set there leak into the default isolation scope and appear on subsequent HTTP requests. +test('BullMQ @OnWorkerEvent active lifecycle breadcrumbs currently leak into subsequent HTTP requests', async ({ + baseURL, +}) => { + const processTransactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.contexts?.trace?.op === 'queue.process'; + }); + + await fetch(`${baseURL}/enqueue/lifecycle-active-breadcrumb-test`); + + await processTransactionPromise; + + const transactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.transaction === 'GET /check-isolation'; + }); + + await fetch(`${baseURL}/check-isolation`); + + const transaction = await transactionPromise; + + const leakedBreadcrumb = (transaction.breadcrumbs || []).find( + (b: any) => b.message === 'leaked-breadcrumb-from-active-event', + ); + // This SHOULD be toBeUndefined() once lifecycle event isolation is implemented. + expect(leakedBreadcrumb).toBeDefined(); +}); + +// TODO: @OnWorkerEvent('failed') handlers run outside the isolation scope created by process(). +// Breadcrumbs set there leak into the default isolation scope and appear on subsequent HTTP requests. +test('BullMQ @OnWorkerEvent failed lifecycle breadcrumbs currently leak into subsequent HTTP requests', async ({ + baseURL, +}) => { + const processTransactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.contexts?.trace?.op === 'queue.process'; + }); + + await fetch(`${baseURL}/enqueue/lifecycle-failed-breadcrumb-test`); + + await processTransactionPromise; + + const transactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.transaction === 'GET /check-isolation'; + }); + + await fetch(`${baseURL}/check-isolation`); + + const transaction = await transactionPromise; + + const leakedBreadcrumb = (transaction.breadcrumbs || []).find( + (b: any) => b.message === 'leaked-breadcrumb-from-failed-event', + ); + // This SHOULD be toBeUndefined() once lifecycle event isolation is implemented. + expect(leakedBreadcrumb).toBeDefined(); +}); + +// The 'progress' event does NOT leak breadcrumbs — unlike 'active', 'completed', and 'failed', +// BullMQ emits it inside the process() call (via job.updateProgress()), so it runs within +// the isolation scope already established by the instrumentation. +test('BullMQ @OnWorkerEvent progress lifecycle breadcrumbs do not leak into subsequent HTTP requests', async ({ + baseURL, +}) => { + const processTransactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.contexts?.trace?.op === 'queue.process'; + }); + + await fetch(`${baseURL}/enqueue/lifecycle-progress-breadcrumb-test`); + + await processTransactionPromise; + + const transactionPromise = waitForTransaction('nestjs-bullmq', transactionEvent => { + return transactionEvent.transaction === 'GET /check-isolation'; + }); + + await fetch(`${baseURL}/check-isolation`); + + const transaction = await transactionPromise; + + const leakedBreadcrumb = (transaction.breadcrumbs || []).find( + (b: any) => b.message === 'leaked-breadcrumb-from-progress-event', + ); + expect(leakedBreadcrumb).toBeUndefined(); +}); diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/tsconfig.build.json b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/tsconfig.build.json new file mode 100644 index 000000000000..26c30d4eddf2 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/tsconfig.build.json @@ -0,0 +1,4 @@ +{ + "extends": "./tsconfig.json", + "exclude": ["node_modules", "test", "dist"] +} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-bullmq/tsconfig.json b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/tsconfig.json new file mode 100644 index 000000000000..cf79f029c781 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nestjs-bullmq/tsconfig.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "module": "commonjs", + "declaration": true, + "removeComments": true, + "emitDecoratorMetadata": true, + "experimentalDecorators": true, + "allowSyntheticDefaultImports": true, + "target": "ES2021", + "sourceMap": true, + "outDir": "./dist", + "baseUrl": "./", + "incremental": true, + "skipLibCheck": true, + "strictNullChecks": false, + "noImplicitAny": false, + "strictBindCallApply": false, + "forceConsistentCasingInFileNames": false, + "noFallthroughCasesInSwitch": false, + "moduleResolution": "Node16" + } +} diff --git a/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/events.controller.ts b/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/events.controller.ts index 5c4c92ac5f7d..581ee0b49b09 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/events.controller.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/events.controller.ts @@ -18,4 +18,9 @@ export class EventsController { return { message: 'Events emitted' }; } + + @Get('test-isolation') + testIsolation() { + return { message: 'ok' }; + } } diff --git a/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/events.service.ts b/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/events.service.ts index ad119106ef08..9ff85ae949d1 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/events.service.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/events.service.ts @@ -3,7 +3,15 @@ import { EventEmitter2 } from '@nestjs/event-emitter'; @Injectable() export class EventsService { - constructor(private readonly eventEmitter: EventEmitter2) {} + constructor(private readonly eventEmitter: EventEmitter2) { + // Emit event periodically outside of HTTP context to test isolation scope behavior. + // setInterval runs in the default async context (no HTTP request), so without proper + // isolation scope forking, the breadcrumb set by the handler leaks into the default + // isolation scope and gets cloned into subsequent HTTP requests. + setInterval(() => { + this.eventEmitter.emit('test-isolation.breadcrumb'); + }, 2000); + } async emitEvents() { await this.eventEmitter.emit('myEvent.pass', { data: 'test' }); diff --git a/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/listeners/test-event.listener.ts b/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/listeners/test-event.listener.ts index 26d934ba384c..ddbe3dd13261 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/listeners/test-event.listener.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/src/listeners/test-event.listener.ts @@ -15,6 +15,14 @@ export class TestEventListener { throw new Error('Test error from event handler'); } + @OnEvent('test-isolation.breadcrumb') + handleIsolationBreadcrumbEvent(): void { + Sentry.addBreadcrumb({ + message: 'leaked-breadcrumb-from-event-handler', + level: 'info', + }); + } + @OnEvent('multiple.first') @OnEvent('multiple.second') async handleMultipleEvents(payload: any): Promise { diff --git a/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/tests/events.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/tests/events.test.ts index 60c1ad6590af..24e93b6cbd86 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/tests/events.test.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-distributed-tracing/tests/events.test.ts @@ -44,6 +44,28 @@ test('Event emitter', async () => { }); }); +test('Event handler breadcrumbs do not leak into subsequent HTTP requests', async () => { + // The app emits 'test-isolation.breadcrumb' every 2s via setInterval (outside HTTP context). + // The handler adds a breadcrumb. Without isolation scope forking, this breadcrumb leaks + // into the default isolation scope and gets cloned into subsequent HTTP requests. + + // Wait for at least one setInterval tick to fire and add the breadcrumb + await new Promise(resolve => setTimeout(resolve, 3000)); + + const transactionPromise = waitForTransaction('nestjs-distributed-tracing', transactionEvent => { + return transactionEvent.transaction === 'GET /events/test-isolation'; + }); + + await fetch('http://localhost:3050/events/test-isolation'); + + const transaction = await transactionPromise; + + const leakedBreadcrumb = (transaction.breadcrumbs || []).find( + (b: any) => b.message === 'leaked-breadcrumb-from-event-handler', + ); + expect(leakedBreadcrumb).toBeUndefined(); +}); + test('Multiple OnEvent decorators', async () => { const firstTxPromise = waitForTransaction('nestjs-distributed-tracing', transactionEvent => { return transactionEvent.transaction === 'event multiple.first|multiple.second'; @@ -64,6 +86,10 @@ test('Multiple OnEvent decorators', async () => { expect(firstTx).toBeDefined(); expect(secondTx).toBeDefined(); - // assert that the correct payloads were added - expect(rootTx.tags).toMatchObject({ 'test-first': true, 'test-second': true }); + + // Tags should be on the event handler transactions, not the root HTTP transaction + expect(firstTx.tags?.['test-first'] || firstTx.tags?.['test-second']).toBe(true); + expect(secondTx.tags?.['test-first'] || secondTx.tags?.['test-second']).toBe(true); + expect(rootTx.tags?.['test-first']).toBeUndefined(); + expect(rootTx.tags?.['test-second']).toBeUndefined(); }); diff --git a/dev-packages/e2e-tests/test-applications/nestjs-fastify/tests/cron-decorator.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-fastify/tests/cron-decorator.test.ts index 1e9d62c2c96a..f00beed5c9b2 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-fastify/tests/cron-decorator.test.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-fastify/tests/cron-decorator.test.ts @@ -64,7 +64,11 @@ test('Cron job triggers send of in_progress envelope', async ({ baseURL }) => { test('Sends exceptions to Sentry on error in cron job', async ({ baseURL }) => { const errorEventPromise = waitForError('nestjs-fastify', event => { - return !event.type && event.exception?.values?.[0]?.value === 'Test error from cron job'; + return ( + !event.type && + event.exception?.values?.[0]?.value === 'Test error from cron job' && + event.exception?.values?.[0]?.mechanism?.type === 'auto.function.nestjs.cron' + ); }); const errorEvent = await errorEventPromise; @@ -73,7 +77,7 @@ test('Sends exceptions to Sentry on error in cron job', async ({ baseURL }) => { expect(errorEvent.exception?.values?.[0]?.value).toBe('Test error from cron job'); expect(errorEvent.exception?.values?.[0]?.mechanism).toEqual({ handled: false, - type: 'auto.cron.nestjs.async', + type: 'auto.function.nestjs.cron', }); expect(errorEvent.contexts?.trace).toEqual({ trace_id: expect.stringMatching(/[a-f0-9]{32}/), diff --git a/dev-packages/e2e-tests/test-applications/nestjs-fastify/tests/transactions.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-fastify/tests/transactions.test.ts index 093375e11e06..43d3afc63468 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-fastify/tests/transactions.test.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-fastify/tests/transactions.test.ts @@ -276,7 +276,7 @@ test('API route transaction includes nest guard span and span started in guard i trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.guard', }, description: 'ExampleGuard', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -284,7 +284,7 @@ test('API route transaction includes nest guard span and span started in guard i timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.guard', }, ]), }), @@ -340,7 +340,7 @@ test('API route transaction includes nest pipe span for valid request', async ({ trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.pipe', }, description: 'ParseIntPipe', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -348,7 +348,7 @@ test('API route transaction includes nest pipe span for valid request', async ({ timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.pipe', }, ]), }), @@ -377,7 +377,7 @@ test('API route transaction includes nest pipe span for invalid request', async trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.pipe', }, description: 'ParseIntPipe', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -385,7 +385,7 @@ test('API route transaction includes nest pipe span for invalid request', async timestamp: expect.any(Number), status: 'internal_error', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.pipe', }, ]), }), @@ -416,7 +416,7 @@ test('API route transaction includes nest interceptor spans before route executi trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'ExampleInterceptor1', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -424,14 +424,14 @@ test('API route transaction includes nest interceptor spans before route executi timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, { span_id: expect.stringMatching(/[a-f0-9]{16}/), trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'ExampleInterceptor2', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -439,7 +439,7 @@ test('API route transaction includes nest interceptor spans before route executi timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -534,7 +534,7 @@ test('API route transaction includes exactly one nest interceptor span after rou trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'Interceptors - After Route', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -542,7 +542,7 @@ test('API route transaction includes exactly one nest interceptor span after rou timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -616,7 +616,7 @@ test('API route transaction includes nest async interceptor spans before route e trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'AsyncInterceptor', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -624,7 +624,7 @@ test('API route transaction includes nest async interceptor spans before route e timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), @@ -701,7 +701,7 @@ test('API route transaction includes exactly one nest async interceptor span aft trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.interceptor', }, description: 'Interceptors - After Route', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -709,7 +709,7 @@ test('API route transaction includes exactly one nest async interceptor span aft timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.interceptor', }, ]), }), diff --git a/dev-packages/e2e-tests/test-applications/nestjs-with-submodules-decorator/tests/transactions.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-with-submodules-decorator/tests/transactions.test.ts index 77cb616450f9..380e5fdc018e 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-with-submodules-decorator/tests/transactions.test.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-with-submodules-decorator/tests/transactions.test.ts @@ -153,7 +153,7 @@ test('API route transaction includes exception filter span for global filter in trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.exception_filter', }, description: 'ExampleExceptionFilter', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -161,7 +161,7 @@ test('API route transaction includes exception filter span for global filter in timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.exception_filter', }, ]), }), @@ -192,7 +192,7 @@ test('API route transaction includes exception filter span for local filter in m trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.exception_filter', }, description: 'LocalExampleExceptionFilter', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -200,7 +200,7 @@ test('API route transaction includes exception filter span for local filter in m timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.exception_filter', }, ]), }), @@ -231,7 +231,7 @@ test('API route transaction includes exception filter span for global filter in trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.exception_filter', }, description: 'ExampleExceptionFilterRegisteredFirst', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -239,7 +239,7 @@ test('API route transaction includes exception filter span for global filter in timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.exception_filter', }, ]), }), diff --git a/dev-packages/e2e-tests/test-applications/nestjs-with-submodules/tests/transactions.test.ts b/dev-packages/e2e-tests/test-applications/nestjs-with-submodules/tests/transactions.test.ts index 63976a559898..416ff72e946b 100644 --- a/dev-packages/e2e-tests/test-applications/nestjs-with-submodules/tests/transactions.test.ts +++ b/dev-packages/e2e-tests/test-applications/nestjs-with-submodules/tests/transactions.test.ts @@ -153,7 +153,7 @@ test('API route transaction includes exception filter span for global filter in trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.exception_filter', }, description: 'ExampleExceptionFilter', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -161,7 +161,7 @@ test('API route transaction includes exception filter span for global filter in timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.exception_filter', }, ]), }), @@ -192,7 +192,7 @@ test('API route transaction includes exception filter span for local filter in m trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.exception_filter', }, description: 'LocalExampleExceptionFilter', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -200,7 +200,7 @@ test('API route transaction includes exception filter span for local filter in m timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.exception_filter', }, ]), }), @@ -231,7 +231,7 @@ test('API route transaction includes exception filter span for global filter in trace_id: expect.stringMatching(/[a-f0-9]{32}/), data: { 'sentry.op': 'middleware.nestjs', - 'sentry.origin': 'auto.middleware.nestjs', + 'sentry.origin': 'auto.middleware.nestjs.exception_filter', }, description: 'ExampleExceptionFilterRegisteredFirst', parent_span_id: expect.stringMatching(/[a-f0-9]{16}/), @@ -239,7 +239,7 @@ test('API route transaction includes exception filter span for global filter in timestamp: expect.any(Number), status: 'ok', op: 'middleware.nestjs', - origin: 'auto.middleware.nestjs', + origin: 'auto.middleware.nestjs.exception_filter', }, ]), }), diff --git a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts index 9fd05f83c5f9..a53f8986512a 100644 --- a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts +++ b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts @@ -34,14 +34,14 @@ test('should create AI spans with correct attributes', async ({ page }) => { expect(firstPipelineSpan?.data?.['vercel.ai.model.id']).toBe('mock-model-id'); expect(firstPipelineSpan?.data?.['vercel.ai.model.provider']).toBe('mock-provider'); expect(firstPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the first span?'); - expect(firstPipelineSpan?.data?.['gen_ai.response.text']).toBe('First span here!'); + expect(firstPipelineSpan?.data?.['gen_ai.output.messages']).toContain('First span here!'); expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10); expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */ // Second AI call - explicitly enabled telemetry const secondPipelineSpan = aiPipelineSpans[0]; expect(secondPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the second span?'); - expect(secondPipelineSpan?.data?.['gen_ai.response.text']).toContain('Second span here!'); + expect(secondPipelineSpan?.data?.['gen_ai.output.messages']).toContain('Second span here!'); // Third AI call - with tool calls /* const thirdPipelineSpan = aiPipelineSpans[2]; diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/app/api/queue-send/route.ts b/dev-packages/e2e-tests/test-applications/nextjs-16/app/api/queue-send/route.ts new file mode 100644 index 000000000000..fe49a990921b --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nextjs-16/app/api/queue-send/route.ts @@ -0,0 +1,14 @@ +import { NextResponse } from 'next/server'; +import { send } from '../../../lib/queue'; + +export const dynamic = 'force-dynamic'; + +export async function POST(request: Request) { + const body = await request.json(); + const topic = body.topic ?? 'orders'; + const payload = body.payload ?? body; + + const { messageId } = await send(topic, payload); + + return NextResponse.json({ messageId }); +} diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/app/api/queues/process-order/route.ts b/dev-packages/e2e-tests/test-applications/nextjs-16/app/api/queues/process-order/route.ts new file mode 100644 index 000000000000..41cec36d5d8a --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nextjs-16/app/api/queues/process-order/route.ts @@ -0,0 +1,10 @@ +import { handleCallback } from '../../../../lib/queue'; + +export const dynamic = 'force-dynamic'; + +// The @vercel/queue handleCallback return type (CallbackRequestInput) doesn't match +// Next.js's strict route handler type check with webpack builds, so we cast it. +export const POST = handleCallback(async (message, _metadata) => { + // Simulate some async work + await new Promise(resolve => setTimeout(resolve, 50)); +}) as unknown as (req: Request) => Promise; diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/app/api/v3/topic/[...params]/route.ts b/dev-packages/e2e-tests/test-applications/nextjs-16/app/api/v3/topic/[...params]/route.ts new file mode 100644 index 000000000000..51dfa2e656db --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nextjs-16/app/api/v3/topic/[...params]/route.ts @@ -0,0 +1,112 @@ +import { NextResponse } from 'next/server'; + +/** + * Mock Vercel Queues API server. + * + * This route handler simulates the Vercel Queues HTTP API so that the real + * @vercel/queue SDK can be used in E2E tests without Vercel infrastructure. + * + * Handled endpoints: + * POST /api/v3/topic/{topic} → SendMessage + * POST /api/v3/topic/{topic}/consumer/{consumer}/id/{messageId} → ReceiveMessageById + * DELETE /api/v3/topic/{topic}/consumer/{consumer}/lease/{handle} → AcknowledgeMessage + * PATCH /api/v3/topic/{topic}/consumer/{consumer}/lease/{handle} → ExtendLease + */ + +export const dynamic = 'force-dynamic'; + +let messageCounter = 0; + +function generateMessageId(): string { + return `msg_test_${++messageCounter}_${Date.now()}`; +} + +function generateReceiptHandle(): string { + return `rh_test_${Date.now()}_${Math.random().toString(36).slice(2)}`; +} + +// Encode a file path into a consumer-group name, matching the SDK's algorithm. +function filePathToConsumerGroup(filePath: string): string { + let result = ''; + for (const char of filePath) { + if (char === '_') result += '__'; + else if (char === '/') result += '_S'; + else if (char === '.') result += '_D'; + else if (/[A-Za-z0-9-]/.test(char)) result += char; + else result += '_' + char.charCodeAt(0).toString(16).toUpperCase().padStart(2, '0'); + } + return result; +} + +// Topic → consumer route path (mirrors vercel.json experimentalTriggers). +const TOPIC_ROUTES: Record = { + orders: '/api/queues/process-order', +}; + +// The file path key used in vercel.json for each consumer route. +const ROUTE_FILE_PATHS: Record = { + '/api/queues/process-order': 'app/api/queues/process-order/route.ts', +}; + +export async function POST(request: Request, { params }: { params: Promise<{ params: string[] }> }) { + const { params: segments } = await params; + + // POST /api/v3/topic/{topic} → SendMessage + if (segments.length === 1) { + const topic = segments[0]; + const body = await request.arrayBuffer(); + const messageId = generateMessageId(); + const receiptHandle = generateReceiptHandle(); + const now = new Date(); + const createdAt = now.toISOString(); + const expiresAt = new Date(now.getTime() + 86_400_000).toISOString(); + const visibilityDeadline = new Date(now.getTime() + 300_000).toISOString(); + + const consumerRoute = TOPIC_ROUTES[topic]; + if (consumerRoute) { + const filePath = ROUTE_FILE_PATHS[consumerRoute] ?? consumerRoute; + const consumerGroup = filePathToConsumerGroup(filePath); + const port = process.env.PORT || 3030; + + // Simulate Vercel infrastructure pushing the message to the consumer. + // Fire-and-forget so the SendMessage response returns immediately. + void fetch(`http://localhost:${port}${consumerRoute}`, { + method: 'POST', + headers: { + 'ce-type': 'com.vercel.queue.v2beta', + 'ce-vqsqueuename': topic, + 'ce-vqsconsumergroup': consumerGroup, + 'ce-vqsmessageid': messageId, + 'ce-vqsreceipthandle': receiptHandle, + 'ce-vqsdeliverycount': '1', + 'ce-vqscreatedat': createdAt, + 'ce-vqsexpiresat': expiresAt, + 'ce-vqsregion': 'test1', + 'ce-vqsvisibilitydeadline': visibilityDeadline, + 'content-type': request.headers.get('content-type') || 'application/json', + }, + body: Buffer.from(body), + }).catch(err => console.error('[mock-queue] Failed to push to consumer:', err)); + } + + return NextResponse.json({ messageId }, { status: 201, headers: { 'Vqs-Message-Id': messageId } }); + } + + // POST /api/v3/topic/{topic}/consumer/{consumer}/id/{messageId} → ReceiveMessageById + // Not used in binary-mode push flow, but handled for completeness. + if (segments.length >= 4 && segments[1] === 'consumer') { + return new Response(null, { status: 204 }); + } + + return NextResponse.json({ error: 'Unknown endpoint' }, { status: 404 }); +} + +// DELETE /api/v3/topic/{topic}/consumer/{consumer}/lease/{receiptHandle} → AcknowledgeMessage +export async function DELETE() { + return new Response(null, { status: 204 }); +} + +// PATCH /api/v3/topic/{topic}/consumer/{consumer}/lease/{receiptHandle} → ExtendLease +export async function PATCH() { + return NextResponse.json({ success: true }); +} diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/lib/queue.ts b/dev-packages/e2e-tests/test-applications/nextjs-16/lib/queue.ts new file mode 100644 index 000000000000..8dc8ce0ad5ed --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nextjs-16/lib/queue.ts @@ -0,0 +1,12 @@ +import { QueueClient } from '@vercel/queue'; + +// For E2E testing, point the SDK at a local mock server running within Next.js. +// The mock API lives at app/api/v3/topic/[...params]/route.ts +const queue = new QueueClient({ + region: 'test1', + resolveBaseUrl: () => new URL(`http://localhost:${process.env.PORT || 3030}`), + token: 'mock-token', + deploymentId: null, +}); + +export const { send, handleCallback } = queue; diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/package.json b/dev-packages/e2e-tests/test-applications/nextjs-16/package.json index 262a3ed00c79..fc5613a1b44e 100644 --- a/dev-packages/e2e-tests/test-applications/nextjs-16/package.json +++ b/dev-packages/e2e-tests/test-applications/nextjs-16/package.json @@ -25,6 +25,7 @@ "dependencies": { "@sentry/nextjs": "latest || *", "@sentry/core": "latest || *", + "@vercel/queue": "^0.1.3", "ai": "^3.0.0", "import-in-the-middle": "^2", "next": "16.1.5", diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts index f7dc95e7d00d..5c519cb89a03 100644 --- a/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts +++ b/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts @@ -34,14 +34,14 @@ test('should create AI spans with correct attributes', async ({ page }) => { expect(firstPipelineSpan?.data?.['vercel.ai.model.id']).toBe('mock-model-id'); expect(firstPipelineSpan?.data?.['vercel.ai.model.provider']).toBe('mock-provider'); expect(firstPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the first span?'); - expect(firstPipelineSpan?.data?.['gen_ai.response.text']).toBe('First span here!'); + expect(firstPipelineSpan?.data?.['gen_ai.output.messages']).toContain('First span here!'); expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10); expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */ // Second AI call - explicitly enabled telemetry const secondPipelineSpan = aiPipelineSpans[0]; expect(secondPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the second span?'); - expect(secondPipelineSpan?.data?.['gen_ai.response.text']).toContain('Second span here!'); + expect(secondPipelineSpan?.data?.['gen_ai.output.messages']).toContain('Second span here!'); // Third AI call - with tool calls /* const thirdPipelineSpan = aiPipelineSpans[2]; diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/tests/vercel-queue.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-16/tests/vercel-queue.test.ts new file mode 100644 index 000000000000..eb4635bc2f5a --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nextjs-16/tests/vercel-queue.test.ts @@ -0,0 +1,51 @@ +import test, { expect } from '@playwright/test'; +import { waitForTransaction } from '@sentry-internal/test-utils'; + +// The queue E2E test only runs in production mode. +// In development mode the @vercel/queue SDK uses an in-memory dispatch that +// bypasses our mock HTTP server, causing duplicate handler invocations. +const isProduction = process.env.TEST_ENV === 'production'; + +test('Should create transactions for queue producer and consumer', async ({ request }) => { + test.skip(!isProduction, 'Vercel Queue test only runs in production mode'); + + // 1. Set up waiters for both the producer and consumer transactions. + const producerTransactionPromise = waitForTransaction('nextjs-16', transactionEvent => { + return transactionEvent?.transaction === 'POST /api/queue-send'; + }); + + const consumerTransactionPromise = waitForTransaction('nextjs-16', transactionEvent => { + return transactionEvent?.transaction === 'POST /api/queues/process-order'; + }); + + // 2. Hit the producer route to enqueue a message. + const response = await request.post('/api/queue-send', { + data: { topic: 'orders', payload: { orderId: 'e2e-test-123', action: 'fulfill' } }, + headers: { 'Content-Type': 'application/json' }, + }); + + const responseBody = await response.json(); + expect(response.status()).toBe(200); + expect(responseBody.messageId).toBeTruthy(); + + // 3. Wait for the producer transaction. + const producerTransaction = await producerTransactionPromise; + expect(producerTransaction).toBeDefined(); + expect(producerTransaction.contexts?.trace?.op).toBe('http.server'); + expect(producerTransaction.contexts?.trace?.status).toBe('ok'); + + // 4. Wait for the consumer transaction (the mock server pushes the message + // to the consumer route via CloudEvent POST). + const consumerTransaction = await consumerTransactionPromise; + expect(consumerTransaction).toBeDefined(); + expect(consumerTransaction.contexts?.trace?.op).toBe('http.server'); + expect(consumerTransaction.contexts?.trace?.status).toBe('ok'); + + // 5. Verify the consumer span has messaging.* attributes from queue instrumentation. + const consumerSpanData = consumerTransaction.contexts?.trace?.data; + expect(consumerSpanData?.['messaging.system']).toBe('vercel.queue'); + expect(consumerSpanData?.['messaging.operation.name']).toBe('process'); + expect(consumerSpanData?.['messaging.destination.name']).toBe('orders'); + expect(consumerSpanData?.['messaging.message.id']).toBeTruthy(); + expect(consumerSpanData?.['messaging.consumer.group.name']).toBeTruthy(); +}); diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/vercel.json b/dev-packages/e2e-tests/test-applications/nextjs-16/vercel.json index b65f0e84701b..58730a0978fb 100644 --- a/dev-packages/e2e-tests/test-applications/nextjs-16/vercel.json +++ b/dev-packages/e2e-tests/test-applications/nextjs-16/vercel.json @@ -8,5 +8,10 @@ "path": "/api/cron-test-error", "schedule": "30 * * * *" } - ] + ], + "functions": { + "app/api/queues/process-order/route.ts": { + "experimentalTriggers": [{ "type": "queue/v2beta", "topic": "orders" }] + } + } } diff --git a/dev-packages/e2e-tests/test-applications/nextjs-pages-dir/pages/underscore-error/test-error-page-no-server.tsx b/dev-packages/e2e-tests/test-applications/nextjs-pages-dir/pages/underscore-error/test-error-page-no-server.tsx new file mode 100644 index 000000000000..b662bce040b8 --- /dev/null +++ b/dev-packages/e2e-tests/test-applications/nextjs-pages-dir/pages/underscore-error/test-error-page-no-server.tsx @@ -0,0 +1,9 @@ +export default function TestRenderErrorPage() { + throw new Error('Test render error to trigger _error.tsx page'); +} + +// IMPORTANT: Specifically test without `getServerSideProps` +// Opt out of static pre-rendering (otherwise, we get build-time errors) +TestRenderErrorPage.getInitialProps = async () => { + return {}; +}; diff --git a/dev-packages/e2e-tests/test-applications/nextjs-pages-dir/tests/error-page-lasteventid.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-pages-dir/tests/error-page-lasteventid.test.ts index 399c5700e8f2..224fbc075488 100644 --- a/dev-packages/e2e-tests/test-applications/nextjs-pages-dir/tests/error-page-lasteventid.test.ts +++ b/dev-packages/e2e-tests/test-applications/nextjs-pages-dir/tests/error-page-lasteventid.test.ts @@ -36,3 +36,32 @@ test('lastEventId() should return the event ID after captureUnderscoreErrorExcep expect(errorEvent.event_id).toBe(returnedEventId); expect(errorEvent.event_id).toBe(lastEventId); }); + +test('lastEventId() should return the event ID for component render errors', async ({ page }) => { + test.skip(isDevMode, 'should be skipped for non-dev mode'); + test.skip(isNext13, 'should be skipped for Next.js 13'); + + const errorEventPromise = waitForError('nextjs-pages-dir', errorEvent => { + return errorEvent?.exception?.values?.[0]?.value === 'Test render error to trigger _error.tsx page'; + }); + + await page.goto('/underscore-error/test-error-page-no-server'); + const errorEvent = await errorEventPromise; + + expect(errorEvent.exception?.values?.[0]?.mechanism?.type).toBe('auto.function.nextjs.page_function'); + expect(errorEvent.exception?.values?.[0]?.mechanism?.handled).toBe(false); + + const eventIdFromReturn = await page.locator('[data-testid="event-id"]').textContent(); + const returnedEventId = eventIdFromReturn?.replace('Event ID from return: ', ''); + + const lastEventIdFromFunction = await page.locator('[data-testid="last-event-id"]').textContent(); + const lastEventId = lastEventIdFromFunction?.replace('Event ID from lastEventId(): ', ''); + + expect(returnedEventId).toBeDefined(); + expect(returnedEventId).not.toBe('No event ID'); + expect(lastEventId).toBeDefined(); + expect(lastEventId).not.toBe('No event ID'); + + expect(returnedEventId).toBe(errorEvent.event_id); + expect(lastEventId).toBe(returnedEventId); +}); diff --git a/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2-custom-sampler/package.json b/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2-custom-sampler/package.json index 972a016966e0..c78cc950074b 100644 --- a/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2-custom-sampler/package.json +++ b/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2-custom-sampler/package.json @@ -12,13 +12,13 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/context-async-hooks": "^2.5.1", - "@opentelemetry/core": "^2.5.1", - "@opentelemetry/instrumentation": "^0.211.0", - "@opentelemetry/instrumentation-http": "^0.211.0", - "@opentelemetry/resources": "^2.5.1", - "@opentelemetry/sdk-trace-node": "^2.5.1", - "@opentelemetry/semantic-conventions": "^1.39.0", + "@opentelemetry/context-async-hooks": "^2.6.0", + "@opentelemetry/core": "^2.6.0", + "@opentelemetry/instrumentation": "^0.213.0", + "@opentelemetry/instrumentation-http": "^0.213.0", + "@opentelemetry/resources": "^2.6.0", + "@opentelemetry/sdk-trace-node": "^2.6.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@sentry/node-core": "latest || *", "@sentry/opentelemetry": "latest || *", "@types/express": "4.17.17", diff --git a/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2-sdk-node/package.json b/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2-sdk-node/package.json index 16b87251e743..749c25696505 100644 --- a/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2-sdk-node/package.json +++ b/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2-sdk-node/package.json @@ -12,15 +12,15 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/context-async-hooks": "^2.5.1", - "@opentelemetry/core": "^2.5.1", - "@opentelemetry/instrumentation": "^0.211.0", - "@opentelemetry/instrumentation-http": "^0.211.0", - "@opentelemetry/resources": "^2.5.1", - "@opentelemetry/sdk-trace-node": "^2.5.1", - "@opentelemetry/semantic-conventions": "^1.39.0", - "@opentelemetry/sdk-node": "^0.211.0", - "@opentelemetry/exporter-trace-otlp-http": "^0.211.0", + "@opentelemetry/context-async-hooks": "^2.6.0", + "@opentelemetry/core": "^2.6.0", + "@opentelemetry/instrumentation": "^0.213.0", + "@opentelemetry/instrumentation-http": "^0.213.0", + "@opentelemetry/resources": "^2.6.0", + "@opentelemetry/sdk-trace-node": "^2.6.0", + "@opentelemetry/semantic-conventions": "^1.40.0", + "@opentelemetry/sdk-node": "^0.213.0", + "@opentelemetry/exporter-trace-otlp-http": "^0.213.0", "@sentry/node-core": "latest || *", "@sentry/opentelemetry": "latest || *", "@types/express": "4.17.17", diff --git a/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2/package.json b/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2/package.json index c6fe70e91773..4db7c3440bed 100644 --- a/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2/package.json +++ b/dev-packages/e2e-tests/test-applications/node-core-express-otel-v2/package.json @@ -14,13 +14,13 @@ "@sentry/node-core": "latest || *", "@sentry/opentelemetry": "latest || *", "@opentelemetry/api": "^1.9.0", - "@opentelemetry/context-async-hooks": "^2.5.1", - "@opentelemetry/core": "^2.5.1", - "@opentelemetry/instrumentation": "^0.211.0", - "@opentelemetry/instrumentation-http": "^0.211.0", - "@opentelemetry/resources": "^2.5.1", - "@opentelemetry/sdk-trace-node": "^2.5.1", - "@opentelemetry/semantic-conventions": "^1.39.0", + "@opentelemetry/context-async-hooks": "^2.6.0", + "@opentelemetry/core": "^2.6.0", + "@opentelemetry/instrumentation": "^0.213.0", + "@opentelemetry/instrumentation-http": "^0.213.0", + "@opentelemetry/resources": "^2.6.0", + "@opentelemetry/sdk-trace-node": "^2.6.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@types/express": "^4.17.21", "@types/node": "^18.19.1", "express": "^4.21.2", diff --git a/dev-packages/e2e-tests/test-applications/node-express/src/app.ts b/dev-packages/e2e-tests/test-applications/node-express/src/app.ts index 2a7cccf238cc..dc755f95d062 100644 --- a/dev-packages/e2e-tests/test-applications/node-express/src/app.ts +++ b/dev-packages/e2e-tests/test-applications/node-express/src/app.ts @@ -14,6 +14,13 @@ Sentry.init({ tunnel: `http://localhost:3031/`, // proxy server tracesSampleRate: 1, enableLogs: true, + integrations: [ + Sentry.nativeNodeFetchIntegration({ + headersToSpanAttributes: { + responseHeaders: ['content-length'], + }, + }), + ], }); import { TRPCError, initTRPC } from '@trpc/server'; @@ -59,6 +66,12 @@ app.get('/test-transaction', function (_req, res) { res.send({ status: 'ok' }); }); + +app.get('/test-outgoing-fetch', async function (_req, res) { + const response = await fetch('http://localhost:3030/test-success'); + const data = await response.json(); + res.send(data); +}); app.get('/test-error', async function (req, res) { const exceptionId = Sentry.captureException(new Error('This is an error')); diff --git a/dev-packages/e2e-tests/test-applications/node-express/tests/transactions.test.ts b/dev-packages/e2e-tests/test-applications/node-express/tests/transactions.test.ts index ce809b6ccdee..7784d7fbe3fe 100644 --- a/dev-packages/e2e-tests/test-applications/node-express/tests/transactions.test.ts +++ b/dev-packages/e2e-tests/test-applications/node-express/tests/transactions.test.ts @@ -216,6 +216,35 @@ test('Sends an API route transaction for an errored route', async ({ baseURL }) }); }); +test('Outgoing fetch spans include response headers when headersToSpanAttributes is configured', async ({ + baseURL, +}) => { + const transactionEventPromise = waitForTransaction('node-express', transactionEvent => { + return ( + transactionEvent?.contexts?.trace?.op === 'http.server' && + transactionEvent?.transaction === 'GET /test-outgoing-fetch' + ); + }); + + await fetch(`${baseURL}/test-outgoing-fetch`); + + const transactionEvent = await transactionEventPromise; + + const spans = transactionEvent.spans || []; + + // Find the outgoing fetch span (http.client operation from undici instrumentation) + const fetchSpan = spans.find( + span => span.op === 'http.client' && span.description?.includes('localhost:3030/test-success'), + ); + + expect(fetchSpan).toBeDefined(); + expect(fetchSpan?.data).toEqual( + expect.objectContaining({ + 'http.response.header.content-length': [expect.any(String)], + }), + ); +}); + test('Extracts HTTP request headers as span attributes', async ({ baseURL }) => { const transactionEventPromise = waitForTransaction('node-express', transactionEvent => { return ( diff --git a/dev-packages/e2e-tests/test-applications/node-otel-custom-sampler/package.json b/dev-packages/e2e-tests/test-applications/node-otel-custom-sampler/package.json index d5db893eaa6d..76fcf398380d 100644 --- a/dev-packages/e2e-tests/test-applications/node-otel-custom-sampler/package.json +++ b/dev-packages/e2e-tests/test-applications/node-otel-custom-sampler/package.json @@ -12,7 +12,7 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/sdk-trace-node": "^2.4.0", + "@opentelemetry/sdk-trace-node": "^2.6.0", "@sentry/node": "latest || *", "@sentry/opentelemetry": "latest || *", "@types/express": "4.17.17", diff --git a/dev-packages/e2e-tests/test-applications/node-otel-sdk-node/package.json b/dev-packages/e2e-tests/test-applications/node-otel-sdk-node/package.json index b08ce5b8894e..f695309f00d7 100644 --- a/dev-packages/e2e-tests/test-applications/node-otel-sdk-node/package.json +++ b/dev-packages/e2e-tests/test-applications/node-otel-sdk-node/package.json @@ -11,8 +11,8 @@ "test:assert": "pnpm test" }, "dependencies": { - "@opentelemetry/sdk-node": "0.210.0", - "@opentelemetry/exporter-trace-otlp-http": "0.210.0", + "@opentelemetry/sdk-node": "0.213.0", + "@opentelemetry/exporter-trace-otlp-http": "0.213.0", "@sentry/node": "latest || *", "@types/express": "4.17.17", "@types/node": "^18.19.1", diff --git a/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/package.json b/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/package.json index 9202de84a2b7..59da61d6d7da 100644 --- a/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/package.json +++ b/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/package.json @@ -12,11 +12,11 @@ }, "dependencies": { "@opentelemetry/api": "1.9.0", - "@opentelemetry/sdk-trace-node": "2.5.0", - "@opentelemetry/exporter-trace-otlp-http": "0.211.0", - "@opentelemetry/instrumentation-undici": "0.21.0", - "@opentelemetry/instrumentation-http": "0.211.0", - "@opentelemetry/instrumentation": "0.211.0", + "@opentelemetry/sdk-trace-node": "2.6.0", + "@opentelemetry/exporter-trace-otlp-http": "0.213.0", + "@opentelemetry/instrumentation-undici": "0.23.0", + "@opentelemetry/instrumentation-http": "0.213.0", + "@opentelemetry/instrumentation": "0.213.0", "@sentry/node": "latest || *", "@types/express": "4.17.17", "@types/node": "^18.19.1", diff --git a/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/src/instrument.ts b/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/src/instrument.ts index 96d2472be497..ea9b6ae57545 100644 --- a/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/src/instrument.ts +++ b/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/src/instrument.ts @@ -38,5 +38,12 @@ provider.register({ }); registerInstrumentations({ - instrumentations: [new UndiciInstrumentation(), new HttpInstrumentation()], + instrumentations: [ + new UndiciInstrumentation({ + headersToSpanAttributes: { + responseHeaders: ['content-length'], + }, + }), + new HttpInstrumentation(), + ], }); diff --git a/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/tests/transactions.test.ts b/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/tests/transactions.test.ts index 00ba1a079b78..9823d0dc6b12 100644 --- a/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/tests/transactions.test.ts +++ b/dev-packages/e2e-tests/test-applications/node-otel-without-tracing/tests/transactions.test.ts @@ -75,7 +75,10 @@ test('Sends an API route transaction to OTLP', async ({ baseURL }) => { { key: 'network.peer.address', value: { stringValue: expect.any(String) } }, { key: 'network.peer.port', value: { intValue: 3030 } }, { key: 'http.response.status_code', value: { intValue: 200 } }, - { key: 'http.response.header.content-length', value: { intValue: 16 } }, + { + key: 'http.response.header.content-length', + value: { arrayValue: { values: [{ stringValue: expect.any(String) }] } }, + }, ]), droppedAttributesCount: 0, events: [], diff --git a/dev-packages/e2e-tests/test-applications/node-otel/package.json b/dev-packages/e2e-tests/test-applications/node-otel/package.json index 9b94da03bfdc..ef7b17112108 100644 --- a/dev-packages/e2e-tests/test-applications/node-otel/package.json +++ b/dev-packages/e2e-tests/test-applications/node-otel/package.json @@ -11,8 +11,8 @@ "test:assert": "pnpm test" }, "dependencies": { - "@opentelemetry/sdk-node": "0.210.0", - "@opentelemetry/exporter-trace-otlp-http": "0.210.0", + "@opentelemetry/sdk-node": "0.213.0", + "@opentelemetry/exporter-trace-otlp-http": "0.213.0", "@sentry/node": "latest || *", "@types/express": "4.17.17", "@types/node": "^18.19.1", diff --git a/dev-packages/e2e-tests/verdaccio-config/config.yaml b/dev-packages/e2e-tests/verdaccio-config/config.yaml index 6e57ee2ea812..beb758aca018 100644 --- a/dev-packages/e2e-tests/verdaccio-config/config.yaml +++ b/dev-packages/e2e-tests/verdaccio-config/config.yaml @@ -74,6 +74,12 @@ packages: unpublish: $all # proxy: npmjs # Don't proxy for E2E tests! + '@sentry/effect': + access: $all + publish: $all + unpublish: $all + # proxy: npmjs # Don't proxy for E2E tests! + '@sentry/ember': access: $all publish: $all diff --git a/dev-packages/external-contributor-gh-action/package.json b/dev-packages/external-contributor-gh-action/package.json index 7358d8e9ed10..14fe3cd29ad9 100644 --- a/dev-packages/external-contributor-gh-action/package.json +++ b/dev-packages/external-contributor-gh-action/package.json @@ -10,8 +10,8 @@ "main": "index.mjs", "type": "module", "scripts": { - "lint": "oxlint .", - "fix": "oxlint . --fix" + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware" }, "dependencies": { "@actions/core": "1.10.1" diff --git a/dev-packages/node-core-integration-tests/package.json b/dev-packages/node-core-integration-tests/package.json index a68f29cd9f82..1cb321b0525c 100644 --- a/dev-packages/node-core-integration-tests/package.json +++ b/dev-packages/node-core-integration-tests/package.json @@ -16,8 +16,8 @@ "build:types": "tsc -p tsconfig.types.json", "clean": "rimraf -g **/node_modules && run-p clean:script", "clean:script": "node scripts/clean.js", - "lint": "oxlint .", - "fix": "oxlint . --fix", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "type-check": "tsc", "test": "vitest run", "test:watch": "yarn test --watch" @@ -27,13 +27,13 @@ "@nestjs/core": "^11", "@nestjs/platform-express": "^11", "@opentelemetry/api": "^1.9.0", - "@opentelemetry/context-async-hooks": "^2.5.1", - "@opentelemetry/core": "^2.5.1", - "@opentelemetry/instrumentation": "^0.211.0", - "@opentelemetry/instrumentation-http": "0.211.0", - "@opentelemetry/resources": "^2.5.1", - "@opentelemetry/sdk-trace-base": "^2.5.1", - "@opentelemetry/semantic-conventions": "^1.39.0", + "@opentelemetry/context-async-hooks": "^2.6.0", + "@opentelemetry/core": "^2.6.0", + "@opentelemetry/instrumentation": "^0.213.0", + "@opentelemetry/instrumentation-http": "0.213.0", + "@opentelemetry/resources": "^2.6.0", + "@opentelemetry/sdk-trace-base": "^2.6.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@sentry/core": "10.43.0", "@sentry/node-core": "10.43.0", "body-parser": "^2.2.2", diff --git a/dev-packages/node-core-integration-tests/suites/light-mode/logs/test.ts b/dev-packages/node-core-integration-tests/suites/light-mode/logs/test.ts index f1dfde5ecdf8..25096f1be7e5 100644 --- a/dev-packages/node-core-integration-tests/suites/light-mode/logs/test.ts +++ b/dev-packages/node-core-integration-tests/suites/light-mode/logs/test.ts @@ -18,6 +18,7 @@ describe('light mode logs', () => { 'sentry.release': { type: 'string', value: '1.0.0' }, 'sentry.sdk.name': { type: 'string', value: 'sentry.javascript.node-light' }, 'sentry.sdk.version': { type: 'string', value: expect.any(String) }, + 'sentry.timestamp.sequence': { type: 'integer', value: expect.any(Number) }, 'server.address': { type: 'string', value: expect.any(String) }, }, body: 'test info log', @@ -31,6 +32,7 @@ describe('light mode logs', () => { 'sentry.release': { type: 'string', value: '1.0.0' }, 'sentry.sdk.name': { type: 'string', value: 'sentry.javascript.node-light' }, 'sentry.sdk.version': { type: 'string', value: expect.any(String) }, + 'sentry.timestamp.sequence': { type: 'integer', value: expect.any(Number) }, 'server.address': { type: 'string', value: expect.any(String) }, }, body: 'test error log', diff --git a/dev-packages/node-core-integration-tests/suites/public-api/logs/test.ts b/dev-packages/node-core-integration-tests/suites/public-api/logs/test.ts index 6f19a7152eae..53c80a6194c5 100644 --- a/dev-packages/node-core-integration-tests/suites/public-api/logs/test.ts +++ b/dev-packages/node-core-integration-tests/suites/public-api/logs/test.ts @@ -26,6 +26,10 @@ describe('logger public API', () => { type: 'string', value: expect.any(String), }, + 'sentry.timestamp.sequence': { + type: 'integer', + value: expect.any(Number), + }, 'server.address': { type: 'string', value: expect.any(String), @@ -63,6 +67,10 @@ describe('logger public API', () => { type: 'string', value: expect.any(String), }, + 'sentry.timestamp.sequence': { + type: 'integer', + value: expect.any(Number), + }, 'server.address': { type: 'string', value: expect.any(String), @@ -100,6 +108,10 @@ describe('logger public API', () => { type: 'string', value: expect.any(String), }, + 'sentry.timestamp.sequence': { + type: 'integer', + value: expect.any(Number), + }, 'server.address': { type: 'string', value: expect.any(String), diff --git a/dev-packages/node-integration-tests/package.json b/dev-packages/node-integration-tests/package.json index ae410b626942..c31e4d2ce5be 100644 --- a/dev-packages/node-integration-tests/package.json +++ b/dev-packages/node-integration-tests/package.json @@ -16,8 +16,8 @@ "build:types": "tsc -p tsconfig.types.json", "clean": "rimraf -g suites/**/node_modules suites/**/tmp_* && run-p clean:script", "clean:script": "node scripts/clean.js", - "lint": "oxlint .", - "fix": "oxlint . --fix", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "type-check": "tsc", "test": "vitest run", "test:watch": "yarn test --watch" @@ -40,6 +40,7 @@ "@prisma/client": "6.15.0", "@sentry/aws-serverless": "10.43.0", "@sentry/core": "10.43.0", + "@sentry/hono": "10.43.0", "@sentry/node": "10.43.0", "@types/mongodb": "^3.6.20", "@types/mysql": "^2.15.21", @@ -56,7 +57,7 @@ "generic-pool": "^3.9.0", "graphql": "^16.11.0", "graphql-tag": "^2.12.6", - "hono": "^4.12.5", + "hono": "^4.12.7", "http-terminator": "^3.2.0", "ioredis": "^5.4.1", "kafkajs": "2.2.4", @@ -66,7 +67,7 @@ "mongodb-memory-server-global": "^10.1.4", "mongoose": "^6.13.6", "mysql": "^2.18.1", - "mysql2": "^3.11.3", + "mysql2": "^3.19.1", "nock": "^13.5.5", "node-cron": "^3.0.3", "node-schedule": "^2.1.1", @@ -80,7 +81,7 @@ "redis-4": "npm:redis@^4.6.14", "reflect-metadata": "0.2.1", "rxjs": "^7.8.2", - "tedious": "^18.6.1", + "tedious": "^19.2.1", "winston": "^3.17.0", "yargs": "^16.2.0" }, @@ -90,7 +91,7 @@ "@types/node-cron": "^3.0.11", "@types/node-schedule": "^2.1.7", "eslint-plugin-regexp": "^1.15.0", - "file-type": "^20.4.1", + "file-type": "^21.3.1", "globby": "11", "react": "^18.3.1", "zod": "^3.24.1" diff --git a/dev-packages/node-integration-tests/suites/hono-sdk/instrument.mjs b/dev-packages/node-integration-tests/suites/hono-sdk/instrument.mjs new file mode 100644 index 000000000000..508cbe487e91 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/hono-sdk/instrument.mjs @@ -0,0 +1 @@ +// Sentry is initialized by the @sentry/hono/node middleware in scenario.mjs diff --git a/dev-packages/node-integration-tests/suites/hono-sdk/scenario.mjs b/dev-packages/node-integration-tests/suites/hono-sdk/scenario.mjs new file mode 100644 index 000000000000..92a08fcb5bb5 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/hono-sdk/scenario.mjs @@ -0,0 +1,31 @@ +import { serve } from '@hono/node-server'; +import { sentry } from '@sentry/hono/node'; +import { loggingTransport, sendPortToRunner } from '@sentry-internal/node-integration-tests'; +import { Hono } from 'hono'; + +const app = new Hono(); + +app.use( + sentry(app, { + dsn: 'https://public@dsn.ingest.sentry.io/1337', + tracesSampleRate: 1.0, + transport: loggingTransport, + }), +); + +app.get('/', c => { + return c.text('Hello from Hono on Node!'); +}); + +app.get('/hello/:name', c => { + const name = c.req.param('name'); + return c.text(`Hello, ${name}!`); +}); + +app.get('/error/:param', () => { + throw new Error('Test error from Hono app'); +}); + +serve({ fetch: app.fetch, port: 0 }, info => { + sendPortToRunner(info.port); +}); diff --git a/dev-packages/node-integration-tests/suites/hono-sdk/test.ts b/dev-packages/node-integration-tests/suites/hono-sdk/test.ts new file mode 100644 index 000000000000..97c8b3481dc5 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/hono-sdk/test.ts @@ -0,0 +1,96 @@ +import { afterAll, describe, expect } from 'vitest'; +import { cleanupChildProcesses, createEsmAndCjsTests } from '../../utils/runner'; + +describe('hono-sdk (Node)', () => { + afterAll(() => { + cleanupChildProcesses(); + }); + + createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => { + test('creates a transaction for a basic GET request', async () => { + const runner = createRunner() + .expect({ + transaction: { + transaction: 'GET /', + contexts: { + trace: { + op: 'http.server', + status: 'ok', + }, + }, + }, + }) + .start(); + runner.makeRequest('get', '/'); + await runner.completed(); + }); + + test('creates a transaction with a parametrized route name', async () => { + const runner = createRunner() + .expect({ + transaction: { + transaction: 'GET /hello/:name', + transaction_info: { + source: 'route', + }, + contexts: { + trace: { + op: 'http.server', + status: 'ok', + }, + }, + }, + }) + .start(); + runner.makeRequest('get', '/hello/world'); + await runner.completed(); + }); + + test('captures an error with the correct mechanism', async () => { + const runner = createRunner() + .ignore('transaction') + .expect({ + event: { + exception: { + values: [ + { + type: 'Error', + value: 'Test error from Hono app', + mechanism: { + type: 'auto.http.hono.context_error', + handled: false, + }, + }, + ], + }, + transaction: 'GET /error/:param', + }, + }) + .start(); + runner.makeRequest('get', '/error/param-123', { expectError: true }); + await runner.completed(); + }); + + test('creates a transaction with internal_error status when an error occurs', async () => { + const runner = createRunner() + .ignore('event') + .expect({ + transaction: { + transaction: 'GET /error/:param', + contexts: { + trace: { + op: 'http.server', + status: 'internal_error', + data: expect.objectContaining({ + 'http.response.status_code': 500, + }), + }, + }, + }, + }) + .start(); + runner.makeRequest('get', '/error/param-456', { expectError: true }); + await runner.completed(); + }); + }); +}); diff --git a/dev-packages/node-integration-tests/suites/public-api/logger/test.ts b/dev-packages/node-integration-tests/suites/public-api/logger/test.ts index f4be1cccc84b..6b9f43e738d2 100644 --- a/dev-packages/node-integration-tests/suites/public-api/logger/test.ts +++ b/dev-packages/node-integration-tests/suites/public-api/logger/test.ts @@ -19,6 +19,10 @@ const commonAttributes: SerializedLog['attributes'] = { type: 'string', value: expect.any(String), }, + 'sentry.timestamp.sequence': { + type: 'integer', + value: expect.any(Number), + }, 'server.address': { type: 'string', value: expect.any(String), diff --git a/dev-packages/node-integration-tests/suites/public-api/metrics/server-address/test.ts b/dev-packages/node-integration-tests/suites/public-api/metrics/server-address/test.ts index 1ee4eda2de3e..048513da3c19 100644 --- a/dev-packages/node-integration-tests/suites/public-api/metrics/server-address/test.ts +++ b/dev-packages/node-integration-tests/suites/public-api/metrics/server-address/test.ts @@ -24,6 +24,7 @@ describe('metrics server.address', () => { 'sentry.environment': { value: 'test', type: 'string' }, 'sentry.sdk.name': { value: 'sentry.javascript.node', type: 'string' }, 'sentry.sdk.version': { value: expect.any(String), type: 'string' }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }, ], diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index 719333488051..2c72ec7daadd 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -131,7 +131,6 @@ describe('Anthropic integration', () => { data: expect.objectContaining({ 'http.request.method': 'POST', 'http.request.method_original': 'POST', - 'http.response.header.content-length': 247, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', @@ -164,7 +163,6 @@ describe('Anthropic integration', () => { data: expect.objectContaining({ 'http.request.method': 'POST', 'http.request.method_original': 'POST', - 'http.response.header.content-length': 15, 'http.response.status_code': 404, 'otel.kind': 'CLIENT', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', @@ -198,7 +196,6 @@ describe('Anthropic integration', () => { data: expect.objectContaining({ 'http.request.method': 'POST', 'http.request.method_original': 'POST', - 'http.response.header.content-length': 19, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', @@ -233,7 +230,6 @@ describe('Anthropic integration', () => { data: expect.objectContaining({ 'http.request.method': 'GET', 'http.request.method_original': 'GET', - 'http.response.header.content-length': 123, 'http.response.status_code': 200, 'otel.kind': 'CLIENT', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client', diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/scenario.mjs index 40f8af031f5a..2d7a09e6f638 100644 --- a/dev-packages/node-integration-tests/suites/tracing/google-genai/scenario.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/scenario.mjs @@ -102,7 +102,7 @@ async function run() { }, ], }); - } catch (error) { + } catch { // Expected error } }); diff --git a/dev-packages/node-integration-tests/suites/tracing/http-client-spans/fetch-strip-query/test.ts b/dev-packages/node-integration-tests/suites/tracing/http-client-spans/fetch-strip-query/test.ts index 8eea877dc72e..2020cfdd09b0 100644 --- a/dev-packages/node-integration-tests/suites/tracing/http-client-spans/fetch-strip-query/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/http-client-spans/fetch-strip-query/test.ts @@ -28,7 +28,6 @@ test('strips and handles query params in spans of outgoing fetch requests', asyn 'http.query': 'id=1', 'http.request.method': 'GET', 'http.request.method_original': 'GET', - 'http.response.header.content-length': 0, 'http.response.status_code': 200, 'network.peer.address': '::1', 'network.peer.port': expect.any(Number), diff --git a/dev-packages/node-integration-tests/suites/tracing/tedious/test.ts b/dev-packages/node-integration-tests/suites/tracing/tedious/test.ts index de78cdf978aa..4b64611ac8f2 100644 --- a/dev-packages/node-integration-tests/suites/tracing/tedious/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/tedious/test.ts @@ -1,7 +1,6 @@ import { afterAll, describe, expect, test } from 'vitest'; import { cleanupChildProcesses, createRunner } from '../../../utils/runner'; -// eslint-disable-next-line jest/no-disabled-tests describe.skip('tedious auto instrumentation', { timeout: 75_000 }, () => { afterAll(() => { cleanupChildProcesses(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario-embeddings.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario-embeddings.mjs new file mode 100644 index 000000000000..23610937bb29 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario-embeddings.mjs @@ -0,0 +1,35 @@ +import * as Sentry from '@sentry/node'; +import { embed, embedMany } from 'ai'; +import { MockEmbeddingModelV1 } from 'ai/test'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + // Single embedding + await embed({ + model: new MockEmbeddingModelV1({ + doEmbed: async () => ({ + embeddings: [[0.1, 0.2, 0.3]], + usage: { tokens: 10 }, + }), + }), + value: 'Embedding test!', + }); + + // Multiple embeddings + await embedMany({ + model: new MockEmbeddingModelV1({ + maxEmbeddingsPerCall: 5, + doEmbed: async () => ({ + embeddings: [ + [0.1, 0.2, 0.3], + [0.4, 0.5, 0.6], + ], + usage: { tokens: 20 }, + }), + }), + values: ['First input', 'Second input'], + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs index 9bfdd4a9793a..b6abe6fdf673 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs @@ -51,6 +51,7 @@ async function run() { }), tools: { getWeather: { + description: 'Get the current weather for a location', parameters: z.object({ location: z.string() }), execute: async args => { return `Weather in ${args.location}: Sunny, 72°F`; diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 2919815b8f0d..7907544a6d11 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -2,19 +2,20 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from ' import type { Event } from '@sentry/node'; import { afterAll, describe, expect } from 'vitest'; import { + GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_ID_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_TEXT_ATTRIBUTE, - GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE, @@ -91,9 +92,10 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, @@ -119,11 +121,12 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -201,6 +204,7 @@ describe('Vercel AI integration', () => { status: 'ok', }), // Seventh span - tool call execution span + // Note: gen_ai.tool.description is NOT present when sendDefaultPii: false because ai.prompt.tools is not recorded expect.objectContaining({ data: { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', @@ -220,7 +224,7 @@ describe('Vercel AI integration', () => { }; const EXPECTED_AVAILABLE_TOOLS_JSON = - '[{"type":"function","name":"getWeather","parameters":{"type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}]'; + '[{"type":"function","name":"getWeather","description":"Get the current weather for a location","parameters":{"type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}]'; const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { transaction: 'main', @@ -230,9 +234,10 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, @@ -264,11 +269,12 @@ describe('Vercel AI integration', () => { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -302,9 +308,10 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, @@ -335,11 +342,12 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -373,10 +381,10 @@ describe('Vercel AI integration', () => { data: { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{ \\"location\\": \\"San Francisco\\" }"}],"finish_reason":"tool_call"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, @@ -408,12 +416,12 @@ describe('Vercel AI integration', () => { [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{ \\"location\\": \\"San Francisco\\" }"}],"finish_reason":"tool_call"}]', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String), [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!', - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, @@ -447,6 +455,7 @@ describe('Vercel AI integration', () => { expect.objectContaining({ data: { [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), @@ -809,7 +818,6 @@ describe('Vercel AI integration', () => { data: expect.objectContaining({ [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[.*"(?:text|content)":"C+".*\]$/), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to truncated messages', }), }), // Second call: Last message is small and kept intact @@ -819,7 +827,6 @@ describe('Vercel AI integration', () => { [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining( 'This is a small message that fits within the limit', ), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to small message', }), }), ]), @@ -830,4 +837,90 @@ describe('Vercel AI integration', () => { }); }, ); + + createEsmAndCjsTests(__dirname, 'scenario-embeddings.mjs', 'instrument.mjs', (createRunner, test) => { + test('creates embedding related spans with sendDefaultPii: false', async () => { + const expectedTransaction = { + transaction: 'main', + spans: expect.arrayContaining([ + // embed doEmbed span + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, + }), + description: 'embeddings mock-model-id', + op: 'gen_ai.embeddings', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // embedMany doEmbed span + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, + }), + description: 'embeddings mock-model-id', + op: 'gen_ai.embeddings', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + ]), + }; + + await createRunner().expect({ transaction: expectedTransaction }).start().completed(); + }); + }); + + createEsmAndCjsTests(__dirname, 'scenario-embeddings.mjs', 'instrument-with-pii.mjs', (createRunner, test) => { + test('creates embedding related spans with sendDefaultPii: true', async () => { + const expectedTransaction = { + transaction: 'main', + spans: expect.arrayContaining([ + // embed doEmbed span with input + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Embedding test!', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, + }), + description: 'embeddings mock-model-id', + op: 'gen_ai.embeddings', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + // embedMany doEmbed span with input + expect.objectContaining({ + data: expect.objectContaining({ + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel', + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id', + [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: '["First input","Second input"]', + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20, + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, + }), + description: 'embeddings mock-model-id', + op: 'gen_ai.embeddings', + origin: 'auto.vercelai.otel', + status: 'ok', + }), + ]), + }; + + await createRunner().expect({ transaction: expectedTransaction }).start().completed(); + }); + }); }); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs index 9ef1b8000741..2c83234064ae 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs @@ -47,6 +47,7 @@ async function run() { }), tools: { getWeather: tool({ + description: 'Get the current weather for a location', inputSchema: z.object({ location: z.string() }), execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`, }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts index 7d981a878363..a84b80e9abc5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts @@ -5,15 +5,15 @@ import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_ID_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_TEXT_ATTRIBUTE, - GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE, @@ -93,7 +93,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.pipeline.name': 'generateText', 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, @@ -127,7 +128,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.timestamp': expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), @@ -196,6 +198,7 @@ describe('Vercel AI integration (V5)', () => { status: 'ok', }), // Seventh span - tool call execution span + // Note: gen_ai.tool.description is NOT present when sendDefaultPii: false because ai.prompt.tools is not recorded expect.objectContaining({ data: { 'vercel.ai.operationId': 'ai.toolCall', @@ -215,7 +218,7 @@ describe('Vercel AI integration (V5)', () => { }; const EXPECTED_AVAILABLE_TOOLS_JSON = - '[{"type":"function","name":"getWeather","inputSchema":{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false}}]'; + '[{"type":"function","name":"getWeather","description":"Get the current weather for a location","inputSchema":{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false}}]'; const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { transaction: 'main', @@ -230,8 +233,9 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -257,10 +261,11 @@ describe('Vercel AI integration (V5)', () => { [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, @@ -290,8 +295,9 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -323,7 +329,8 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.timestamp': expect.any(String), [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), @@ -349,8 +356,9 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]', 'vercel.ai.response.finishReason': 'tool-calls', - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -375,14 +383,14 @@ describe('Vercel AI integration (V5)', () => { 'vercel.ai.pipeline.name': 'generateText.doGenerate', [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]', 'vercel.ai.prompt.toolChoice': expect.any(String), [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set 'vercel.ai.response.timestamp': expect.any(String), - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], @@ -406,6 +414,7 @@ describe('Vercel AI integration (V5)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/scenario.mjs index 66233d1dabe5..ee2dc802cd9c 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/scenario.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/scenario.mjs @@ -62,6 +62,7 @@ async function run() { }), tools: { getWeather: tool({ + description: 'Get the current weather for a location', inputSchema: z.object({ location: z.string() }), execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`, }), diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts index 2a213f39410d..39ee00254373 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts @@ -4,15 +4,15 @@ import { afterAll, describe, expect } from 'vitest'; import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_ID_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_TEXT_ATTRIBUTE, - GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, GEN_AI_TOOL_INPUT_ATTRIBUTE, GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE, @@ -95,10 +95,11 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -129,9 +130,10 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -199,6 +201,7 @@ describe('Vercel AI integration (V6)', () => { status: 'ok', }), // Seventh span - tool call execution span + // Note: gen_ai.tool.description is NOT present when sendDefaultPii: false because ai.prompt.tools is not recorded expect.objectContaining({ data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', @@ -218,7 +221,7 @@ describe('Vercel AI integration (V6)', () => { }; const EXPECTED_AVAILABLE_TOOLS_JSON = - '[{"type":"function","name":"getWeather","inputSchema":{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false}}]'; + '[{"type":"function","name":"getWeather","description":"Get the current weather for a location","inputSchema":{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false}}]'; const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { transaction: 'main', @@ -233,8 +236,9 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -260,10 +264,11 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!', 'vercel.ai.response.timestamp': expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, @@ -293,8 +298,9 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', 'vercel.ai.response.finishReason': 'stop', - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -327,9 +333,10 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.response.finishReason': 'stop', 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), 'vercel.ai.response.timestamp': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]', [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'], [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20, @@ -352,8 +359,9 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]', + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]', 'vercel.ai.response.finishReason': 'tool-calls', - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id', @@ -378,14 +386,14 @@ describe('Vercel AI integration (V6)', () => { 'vercel.ai.pipeline.name': 'generateText.doGenerate', 'vercel.ai.request.headers.user-agent': expect.any(String), [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), + [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]: + '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]', 'vercel.ai.prompt.toolChoice': expect.any(String), [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON, 'vercel.ai.response.finishReason': 'tool-calls', 'vercel.ai.response.id': expect.any(String), 'vercel.ai.response.model': 'mock-model-id', - // 'gen_ai.response.text': 'Tool call completed!', // TODO: look into why this is not being set 'vercel.ai.response.timestamp': expect.any(String), - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), 'vercel.ai.settings.maxRetries': 2, 'vercel.ai.streaming': false, [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'], @@ -409,6 +417,7 @@ describe('Vercel AI integration (V6)', () => { data: expect.objectContaining({ 'vercel.ai.operationId': 'ai.toolCall', [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1', + [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location', [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather', [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String), [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String), diff --git a/dev-packages/node-overhead-gh-action/index.mjs b/dev-packages/node-overhead-gh-action/index.mjs index 0a0f02e41b9a..8c3e2c56873b 100644 --- a/dev-packages/node-overhead-gh-action/index.mjs +++ b/dev-packages/node-overhead-gh-action/index.mjs @@ -157,7 +157,7 @@ async function run() { body, }); } - } catch (error) { + } catch { core.error( "Error updating comment. This can happen for PR's originating from a fork without write permissions.", ); diff --git a/dev-packages/node-overhead-gh-action/package.json b/dev-packages/node-overhead-gh-action/package.json index 58fe3396c498..9cc29491db47 100644 --- a/dev-packages/node-overhead-gh-action/package.json +++ b/dev-packages/node-overhead-gh-action/package.json @@ -19,13 +19,13 @@ "clean": "rimraf -g **/node_modules", "db:up": "docker compose up", "db:down": "docker compose down --volumes", - "lint": "oxlint .", - "fix": "oxlint . --fix" + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware" }, "dependencies": { "@sentry/node": "10.43.0", "express": "^4.21.2", - "mysql2": "^3.14.4" + "mysql2": "^3.19.1" }, "devDependencies": { "@actions/artifact": "5.0.3", diff --git a/dev-packages/rollup-utils/npmHelpers.mjs b/dev-packages/rollup-utils/npmHelpers.mjs index d5f7428b992d..6f399c1c3f59 100644 --- a/dev-packages/rollup-utils/npmHelpers.mjs +++ b/dev-packages/rollup-utils/npmHelpers.mjs @@ -27,7 +27,7 @@ const __dirname = path.dirname(fileURLToPath(import.meta.url)); const packageDotJSON = JSON.parse(fs.readFileSync(path.resolve(process.cwd(), './package.json'), { encoding: 'utf8' })); -const ignoreSideEffects = /[\\\/]debug-build\.ts$/; +const ignoreSideEffects = /[\\/]debug-build\.ts$/; export function makeBaseNPMConfig(options = {}) { const { diff --git a/dev-packages/rollup-utils/plugins/npmPlugins.mjs b/dev-packages/rollup-utils/plugins/npmPlugins.mjs index 3cb9ca7d50f9..221a4a34f8c4 100644 --- a/dev-packages/rollup-utils/plugins/npmPlugins.mjs +++ b/dev-packages/rollup-utils/plugins/npmPlugins.mjs @@ -7,9 +7,6 @@ * Sucrase plugin docs: https://github.com/rollup/plugins/tree/master/packages/sucrase */ -import * as fs from 'fs'; -import * as path from 'path'; - import json from '@rollup/plugin-json'; import replace from '@rollup/plugin-replace'; import cleanup from 'rollup-plugin-cleanup'; diff --git a/dev-packages/size-limit-gh-action/index.mjs b/dev-packages/size-limit-gh-action/index.mjs index 3dac81a3f080..86cbcd21a793 100644 --- a/dev-packages/size-limit-gh-action/index.mjs +++ b/dev-packages/size-limit-gh-action/index.mjs @@ -171,7 +171,7 @@ async function run() { body, }); } - } catch (error) { + } catch { core.error( "Error updating comment. This can happen for PR's originating from a fork without write permissions.", ); diff --git a/dev-packages/size-limit-gh-action/package.json b/dev-packages/size-limit-gh-action/package.json index 2cf193270bda..c7b8b16540ec 100644 --- a/dev-packages/size-limit-gh-action/package.json +++ b/dev-packages/size-limit-gh-action/package.json @@ -10,8 +10,8 @@ "main": "index.mjs", "type": "module", "scripts": { - "lint": "oxlint .", - "fix": "oxlint . --fix" + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware" }, "dependencies": { "@actions/artifact": "^6.1.0", diff --git a/dev-packages/test-utils/package.json b/dev-packages/test-utils/package.json index 7f7ac3780821..2207c0cce346 100644 --- a/dev-packages/test-utils/package.json +++ b/dev-packages/test-utils/package.json @@ -31,8 +31,8 @@ "node": ">=18" }, "scripts": { - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "build": "run-s build:transpile build:types", "build:tarball": "run-s build:transpile build:types", "build:dev": "yarn build", diff --git a/docs/new-sdk-release-checklist.md b/docs/new-sdk-release-checklist.md index 1a4b4635dfd2..d95ea2bf7b0f 100644 --- a/docs/new-sdk-release-checklist.md +++ b/docs/new-sdk-release-checklist.md @@ -73,34 +73,31 @@ order**. Note that you can prepare the PRs at any time but the **merging oder** - [ ] 1. If not yet done, be sure to remove the `private: true` property from your SDK’s `package.json`. Additionally, ensure that `"publishConfig": {"access": "public"}` is set. -- [ ] 2. Make sure that the new SDK is **not added** - in`[craft.yml](https://github.com/getsentry/sentry-javascript/blob/develop/.craft.yml)` as a target for the - **Sentry release registry**\ - _Once this is added, craft will try to publish an entry in the next release which does not work and caused failed release - runs in the past_ -- [ ] 3. Add an `npm` target in `craft.yml` for the new package. Make sure to insert it in the right place, after all +- [ ] 2. Add an `npm` target in `craft.yml` for the new package. Make sure to insert it in the right place, after all the Sentry dependencies of your package but before packages that depend on your new package (if applicable). ```yml - name: npm id: '@sentry/[yourPackage]' includeNames: /^sentry-[yourPackage]-\d.*\.tgz$/ ``` -- [ ] 4. Cut a new release (as usual, see - [Publishing Release](https://github.com/getsentry/sentry-javascript/blob/develop/docs/publishing-a-release.md)) +- [ ] 3. Add a `registry` target in `craft.yml` for the new package. + For new packages, Craft will automatically create the required directory structure and initial manifest in the Sentry Release Registry ([Craft Docs](https://craft.sentry.dev/targets/registry/#creating-new-packages)). + ```yml + name: 'Sentry [Package] SDK' + sdkName: 'sentry.javascript.[package]' + packageUrl: 'https://www.npmjs.com/package/@sentry/[package]' + mainDocsUrl: 'https://docs.sentry.io/platforms/javascript/guides/[package]/' + onlyIfPresent: /^sentry-[package]-\d.*\.tgz$/ + ``` +- [ ] 4. Cut a new release (as usual, see + [Publishing Release](https://github.com/getsentry/sentry-javascript/blob/develop/docs/publishing-a-release.md)) ### After the Release -- [ ] 4. Check that the package was in fact published to NPM -- [ ] 5. Add the new SDK to the [Sentry Release Registry](https://github.com/getsentry/sentry-release-registry) \ - Instructions on how to do this can be found [here](https://github.com/getsentry/sentry-release-registry#adding-new-sdks) - \ - You have to fork this repo and PR the files from your fork to the main repo \ - [Example PR](https://github.com/getsentry/sentry-release-registry/pull/80) from the Svelte SDK - -- [ ] 2. Add an entry to [craft.yml](https://github.com/getsentry/sentry-javascript/blob/develop/.craft.yml) to add - releases of your SDK to the Sentry release registry \ - [Example PR](https://github.com/getsentry/sentry-javascript/pull/5547) from the Svelte SDK \ - _Subsequent releases will now be added automatically to the registry_ +- [ ] 1. Check that the package was in fact published to NPM +- [ ] 2. Check that the SDK is added to the Sentry Release Registry [npm packages](https://github.com/getsentry/sentry-release-registry/tree/master/packages/npm/%40sentry) and [SDK symlinks](https://github.com/getsentry/sentry-release-registry/tree/master/sdks) +- [ ] 3. In case the package is missing anywhere, add the missing content. Instructions on how to do this can be found [here](https://github.com/getsentry/sentry-release-registry#adding-new-sdks) + [Example PR](https://github.com/getsentry/sentry-release-registry/pull/80) from the Svelte SDK. ## Follow-up Tasks diff --git a/package.json b/package.json index e0970e428220..42edbcaf8879 100644 --- a/package.json +++ b/package.json @@ -21,14 +21,12 @@ "clean:tarballs": "rimraf {packages,dev-packages}/*/*.tgz", "clean:watchman": "watchman watch-del \".\"", "clean:all": "run-s clean:build clean:tarballs clean:caches clean:deps clean:watchman", - "fix": "run-s fix:oxfmt fix:oxlint", - "fix:oxlint": "oxlint . --fix", - "fix:oxfmt": "oxfmt . --write", - "format:check": "oxfmt . --check", "format": "oxfmt . --write", - "lint": "run-s lint:oxfmt lint:oxlint", - "lint:oxfmt": "oxfmt . --check", - "lint:oxlint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "format:check": "oxfmt . --check", + "verify": "run-s format:check lint", + "fix": "run-s format lint:fix", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "lint:es-compatibility": "nx run-many -t lint:es-compatibility", "dedupe-deps:check": "yarn-deduplicate yarn.lock --list --fail", "dedupe-deps:fix": "yarn-deduplicate yarn.lock", @@ -61,6 +59,7 @@ "packages/core", "packages/cloudflare", "packages/deno", + "packages/effect", "packages/ember", "packages/eslint-config-sdk", "packages/eslint-plugin-sdk", @@ -131,8 +130,8 @@ "nodemon": "^3.1.10", "npm-run-all2": "^6.2.0", "nx": "22.5.0", - "oxfmt": "^0.32.0", - "oxlint": "^1.50.0", + "oxfmt": "^0.38.0", + "oxlint": "^1.53.0", "oxlint-tsgolint": "^0.16.0", "rimraf": "^5.0.10", "rollup": "^4.59.0", diff --git a/packages/angular/package.json b/packages/angular/package.json index 513e05ab9612..ad7bb59816bf 100644 --- a/packages/angular/package.json +++ b/packages/angular/package.json @@ -51,8 +51,8 @@ "build:tarball": "npm pack ./build", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-angular-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/{esm2020,fesm2015,fesm2020}/*.mjs --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/angular/src/errorhandler.ts b/packages/angular/src/errorhandler.ts index ceb05c0b9e9f..1309ede53775 100644 --- a/packages/angular/src/errorhandler.ts +++ b/packages/angular/src/errorhandler.ts @@ -26,7 +26,7 @@ export interface ErrorHandlerOptions { function tryToUnwrapZonejsError(error: unknown): unknown | Error { // TODO: once Angular14 is the minimum requirement ERROR_ORIGINAL_ERROR and // getOriginalError from error.ts can be used directly. - return error && (error as { ngOriginalError: Error }).ngOriginalError + return (error as { ngOriginalError?: Error })?.ngOriginalError ? (error as { ngOriginalError: Error }).ngOriginalError : error; } @@ -39,6 +39,7 @@ function extractHttpModuleError(error: HttpErrorResponse): string | Error { // ... or an`ErrorEvent`, which can provide us with the message but no stack... // guarding `ErrorEvent` against `undefined` as it's not defined in Node environments + // oxlint-disable-next-line typescript/prefer-optional-chain if (typeof ErrorEvent !== 'undefined' && error.error instanceof ErrorEvent && error.error.message) { return error.error.message; } diff --git a/packages/astro/package.json b/packages/astro/package.json index 4fadeb04e04d..4d45bbffd429 100644 --- a/packages/astro/package.json +++ b/packages/astro/package.json @@ -53,7 +53,7 @@ "access": "public" }, "peerDependencies": { - "astro": ">=3.x || >=4.0.0-beta || >=5.x" + "astro": ">=3.x || >=4.0.0-beta" }, "dependencies": { "@sentry/browser": "10.43.0", @@ -76,8 +76,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.client.ts && madge --circular src/index.server.ts && madge --circular src/index.types.ts", "clean": "rimraf build coverage sentry-astro-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/astro/src/server/middleware.ts b/packages/astro/src/server/middleware.ts index a12c25ff6045..2556ebf0894b 100644 --- a/packages/astro/src/server/middleware.ts +++ b/packages/astro/src/server/middleware.ts @@ -419,17 +419,22 @@ function getParametrizedRoute( const contextWithRoutePattern = ctx; const rawRoutePattern = contextWithRoutePattern.routePattern; - // @ts-expect-error Implicit any on Symbol.for (This is available in Astro 5) - // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access - const routesFromManifest = ctx?.[Symbol.for('context.routes')]?.manifest?.routes; - - // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + const routesFromManifest = + // @ts-expect-error Implicit any on Symbol.for (This is available in Astro 5) + // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + ctx?.[Symbol.for('context.routes')]?.manifest?.routes ?? + // @ts-expect-error Implicit any on Symbol.for (This is available in Astro 6) + // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + ctx?.[Symbol.for('astro.pipeline')]?.manifest?.routes; + + // oxlint-disable-next-line typescript/no-unsafe-member-access const matchedRouteSegmentsFromManifest = routesFromManifest?.find( (route: { routeData?: { route?: string } }) => route?.routeData?.route === rawRoutePattern, + // oxlint-disable-next-line typescript/no-unsafe-member-access )?.routeData?.segments; return ( - // Astro v5 - Joining the segments to get the correct casing of the parametrized route + // Astro v5+ - Joining the segments to get the correct casing of the parametrized route (matchedRouteSegmentsFromManifest && joinRouteSegments(matchedRouteSegmentsFromManifest)) || // Fallback (Astro v4 and earlier) interpolateRouteFromUrlAndParams(ctx.url.pathname, ctx.params) diff --git a/packages/aws-serverless/package.json b/packages/aws-serverless/package.json index b8b9721d6ca2..6e5b71f2167d 100644 --- a/packages/aws-serverless/package.json +++ b/packages/aws-serverless/package.json @@ -66,9 +66,9 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/instrumentation": "^0.211.0", - "@opentelemetry/instrumentation-aws-sdk": "0.66.0", - "@opentelemetry/semantic-conventions": "^1.39.0", + "@opentelemetry/instrumentation": "^0.213.0", + "@opentelemetry/instrumentation-aws-sdk": "0.68.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@sentry/core": "10.43.0", "@sentry/node": "10.43.0", "@sentry/node-core": "10.43.0", @@ -92,8 +92,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build dist-awslambda-layer coverage sentry-serverless-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/npm/cjs/*.js && es-check es2022 ./build/npm/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/browser-utils/package.json b/packages/browser-utils/package.json index 797f186cf307..6b1263a942ee 100644 --- a/packages/browser-utils/package.json +++ b/packages/browser-utils/package.json @@ -53,8 +53,8 @@ "build:transpile:watch": "rollup -c rollup.npm.config.mjs --watch", "build:tarball": "npm pack", "clean": "rimraf build coverage sentry-internal-browser-utils-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test:unit": "vitest run", "test": "vitest run", diff --git a/packages/browser-utils/src/metrics/browserMetrics.ts b/packages/browser-utils/src/metrics/browserMetrics.ts index 3c3dee074cb5..28d1f2bfaec8 100644 --- a/packages/browser-utils/src/metrics/browserMetrics.ts +++ b/packages/browser-utils/src/metrics/browserMetrics.ts @@ -573,7 +573,6 @@ type StartEventName = | 'loadEvent'; type EndEventName = - | 'connectEnd' | 'domainLookupStart' | 'domainLookupEnd' | 'unloadEventEnd' diff --git a/packages/browser-utils/src/metrics/web-vitals/lib/initUnique.ts b/packages/browser-utils/src/metrics/web-vitals/lib/initUnique.ts index ef3e721dc09e..5043dcaa62b6 100644 --- a/packages/browser-utils/src/metrics/web-vitals/lib/initUnique.ts +++ b/packages/browser-utils/src/metrics/web-vitals/lib/initUnique.ts @@ -27,7 +27,7 @@ export function initUnique(identityObj: object, ClassObj: new () => T): T { instanceMap.set(identityObj, new ClassObj()); } return instanceMap.get(identityObj)! as T; - } catch (e) { + } catch (_e) { // --- START Sentry-custom code (try/catch wrapping) --- // Fix for cases where identityObj is not a valid key for WeakMap (sometimes a problem in Safari) // Just return a new instance without caching it in instanceMap diff --git a/packages/browser/package.json b/packages/browser/package.json index 992cc3765bd6..fad2c4f9428c 100644 --- a/packages/browser/package.json +++ b/packages/browser/package.json @@ -69,8 +69,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage .rpt2_cache sentry-browser-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/{bundles,npm/cjs/prod}/*.js && es-check es2020 ./build/npm/esm/prod/*.js --module", "size:check": "cat build/bundles/bundle.min.js | gzip -9 | wc -c | awk '{$1=$1/1024; print \"ES2017: \",$1,\"kB\";}'", "test": "vitest run", diff --git a/packages/browser/src/eventbuilder.ts b/packages/browser/src/eventbuilder.ts index 9823d596a502..798a068b5adf 100644 --- a/packages/browser/src/eventbuilder.ts +++ b/packages/browser/src/eventbuilder.ts @@ -165,6 +165,7 @@ function getPopFirstTopFrames(ex: Error & { framesToPop?: unknown }): number { function isWebAssemblyException(exception: unknown): exception is WebAssembly.Exception { // Check for support // @ts-expect-error - WebAssembly.Exception is a valid class + // oxlint-disable-next-line typescript/prefer-optional-chain if (typeof WebAssembly !== 'undefined' && typeof WebAssembly.Exception !== 'undefined') { // @ts-expect-error - WebAssembly.Exception is a valid class return exception instanceof WebAssembly.Exception; diff --git a/packages/browser/src/exports.ts b/packages/browser/src/exports.ts index bf0de4783c5a..e77465a87b39 100644 --- a/packages/browser/src/exports.ts +++ b/packages/browser/src/exports.ts @@ -54,6 +54,7 @@ export { setTag, setTags, setUser, + setConversationId, withScope, withIsolationScope, functionToStringIntegration, diff --git a/packages/browser/src/profiling/utils.ts b/packages/browser/src/profiling/utils.ts index dceb5e45a691..f0d067c841d8 100644 --- a/packages/browser/src/profiling/utils.ts +++ b/packages/browser/src/profiling/utils.ts @@ -614,7 +614,7 @@ export function startJSSelfProfile(): JSSelfProfiler | undefined { // as we risk breaking the user's application, so just disable profiling and log an error. try { return new JSProfilerConstructor({ sampleInterval: samplingIntervalMS, maxBufferSize: maxSamples }); - } catch (e) { + } catch (_e) { if (DEBUG_BUILD) { debug.log( "[Profiling] Failed to initialize the Profiling constructor, this is likely due to a missing 'Document-Policy': 'js-profiling' header.", diff --git a/packages/browser/src/stack-parsers.ts b/packages/browser/src/stack-parsers.ts index 02c3a1f66af3..cb74bc1e6ce6 100644 --- a/packages/browser/src/stack-parsers.ts +++ b/packages/browser/src/stack-parsers.ts @@ -88,7 +88,7 @@ const chromeStackParserFn: StackLineParserFn = line => { const parts = chromeRegex.exec(line) as null | [string, string, string, string, string]; if (parts) { - const isEval = parts[2] && parts[2].indexOf('eval') === 0; // start of line + const isEval = parts[2]?.indexOf('eval') === 0; // start of line if (isEval) { const subMatch = chromeEvalRegex.exec(parts[2]) as null | [string, string, string, string]; diff --git a/packages/browser/src/tracing/browserTracingIntegration.ts b/packages/browser/src/tracing/browserTracingIntegration.ts index c71acf106258..b6dc8b2e92b8 100644 --- a/packages/browser/src/tracing/browserTracingIntegration.ts +++ b/packages/browser/src/tracing/browserTracingIntegration.ts @@ -54,6 +54,22 @@ import { defaultRequestInstrumentationOptions, instrumentOutgoingRequests } from export const BROWSER_TRACING_INTEGRATION_ID = 'BrowserTracing'; +/** + * We don't want to start a bunch of idle timers and PerformanceObservers + * for web crawlers, as they may prevent the page from being seen as "idle" + * by the crawler's rendering engine (e.g. Googlebot's headless Chromium). + */ +const BOT_USER_AGENT_RE = + /Googlebot|Google-InspectionTool|Storebot-Google|Bingbot|Slurp|DuckDuckBot|Baiduspider|YandexBot|Facebot|facebookexternalhit|LinkedInBot|Twitterbot|Applebot/i; + +function _isBotUserAgent(): boolean { + const nav = WINDOW.navigator as Navigator | undefined; + if (!nav?.userAgent) { + return false; + } + return BOT_USER_AGENT_RE.test(nav.userAgent); +} + interface RouteInfo { name: string | undefined; source: TransactionSource | undefined; @@ -384,6 +400,8 @@ export const browserTracingIntegration = ((options: Partial void); let lastInteractionTimestamp: number | undefined; @@ -484,6 +502,11 @@ export const browserTracingIntegration = ((options: Partial { try { - const serialized = await serializeEnvelope(env); + const serialized = serializeEnvelope(env); await push(getStore(), serialized, options.maxQueueSize || 30); } catch { // @@ -135,7 +135,7 @@ function createIndexedDbStore(options: BrowserOfflineTransportOptions): OfflineS }, unshift: async (env: Envelope) => { try { - const serialized = await serializeEnvelope(env); + const serialized = serializeEnvelope(env); await unshift(getStore(), serialized, options.maxQueueSize || 30); } catch { // diff --git a/packages/browser/test/tracing/browserTracingIntegration.test.ts b/packages/browser/test/tracing/browserTracingIntegration.test.ts index 991fcc1393a4..58294ea31fa2 100644 --- a/packages/browser/test/tracing/browserTracingIntegration.test.ts +++ b/packages/browser/test/tracing/browserTracingIntegration.test.ts @@ -86,6 +86,68 @@ describe('browserTracingIntegration', () => { Object.defineProperty(WINDOW, 'history', { value: originalGlobalHistory }); }); + describe('bot user agent detection', () => { + let originalNavigator: Navigator; + + beforeEach(() => { + originalNavigator = WINDOW.navigator; + }); + + afterEach(() => { + Object.defineProperty(WINDOW, 'navigator', { value: originalNavigator, writable: true, configurable: true }); + }); + + function setUserAgent(ua: string): void { + Object.defineProperty(WINDOW, 'navigator', { + value: { userAgent: ua }, + writable: true, + configurable: true, + }); + } + + it.each([ + 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', + 'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/W.X.Y.Z Mobile Safari/537.36 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', + 'Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; Bingbot/2.0; +http://www.bing.com/bingbot.htm) Chrome/W.X.Y.Z Safari/537.36', + 'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)', + 'facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)', + 'LinkedInBot/1.0 (compatible; Mozilla/5.0)', + 'Twitterbot/1.0', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15 (Applebot/0.1)', + 'Mozilla/5.0 (compatible; Google-InspectionTool/1.0)', + ])('skips tracing setup for bot user agent: %s', ua => { + setUserAgent(ua); + + const client = new BrowserClient( + getDefaultBrowserClientOptions({ + tracesSampleRate: 1, + integrations: [browserTracingIntegration()], + }), + ); + setCurrentClient(client); + client.init(); + + expect(getActiveSpan()).toBeUndefined(); + }); + + it('does not skip tracing setup for normal user agents', () => { + setUserAgent( + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36', + ); + + const client = new BrowserClient( + getDefaultBrowserClientOptions({ + tracesSampleRate: 1, + integrations: [browserTracingIntegration()], + }), + ); + setCurrentClient(client); + client.init(); + + expect(getActiveSpan()).toBeDefined(); + }); + }); + it('works with tracing enabled', () => { const client = new BrowserClient( getDefaultBrowserClientOptions({ diff --git a/packages/bun/package.json b/packages/bun/package.json index 8ae45b723bb0..b90af8efe83f 100644 --- a/packages/bun/package.json +++ b/packages/bun/package.json @@ -58,8 +58,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-bun-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/cjs/*.js && es-check es2022 ./build/esm/*.js --module", "install:bun": "node ./scripts/install-bun.js", "test": "run-s install:bun test:bun", diff --git a/packages/bun/scripts/install-bun.js b/packages/bun/scripts/install-bun.js index e2221e549d3e..2c14afa5d273 100644 --- a/packages/bun/scripts/install-bun.js +++ b/packages/bun/scripts/install-bun.js @@ -10,7 +10,7 @@ const https = require('https'); const installScriptUrl = 'https://bun.sh/install'; // Check if bun is installed -exec('bun --version', (error, version) => { +exec('bun --version', (error, _version) => { if (error) { console.error('bun is not installed. Installing...'); installLatestBun(); diff --git a/packages/bun/src/integrations/bunserver.ts b/packages/bun/src/integrations/bunserver.ts index 83e7f5ff4967..11c12da37218 100644 --- a/packages/bun/src/integrations/bunserver.ts +++ b/packages/bun/src/integrations/bunserver.ts @@ -207,10 +207,9 @@ function wrapRequestHandler( routeName = route; } - Object.assign( - attributes, - httpHeadersToSpanAttributes(request.headers.toJSON(), getClient()?.getOptions().sendDefaultPii ?? false), - ); + const sendDefaultPii = getClient()?.getOptions().sendDefaultPii ?? false; + + Object.assign(attributes, httpHeadersToSpanAttributes(request.headers.toJSON(), sendDefaultPii)); isolationScope.setSDKProcessingMetadata({ normalizedRequest: { @@ -238,10 +237,12 @@ function wrapRequestHandler( const response = (await target.apply(thisArg, args)) as Response | undefined; if (response?.status) { setHttpStatus(span, response.status); + isolationScope.setContext('response', { - headers: response.headers.toJSON(), status_code: response.status, }); + + span.setAttributes(httpHeadersToSpanAttributes(response.headers.toJSON(), sendDefaultPii, 'response')); } return response; } catch (e) { diff --git a/packages/bun/test/integrations/bunserver.test.ts b/packages/bun/test/integrations/bunserver.test.ts index 9792c59c2691..1605d7c0be90 100644 --- a/packages/bun/test/integrations/bunserver.test.ts +++ b/packages/bun/test/integrations/bunserver.test.ts @@ -1,10 +1,15 @@ import * as SentryCore from '@sentry/core'; import { afterEach, beforeAll, beforeEach, describe, expect, spyOn, test } from 'bun:test'; import { instrumentBunServe } from '../../src/integrations/bunserver'; +import type { Span } from '@sentry/core'; describe('Bun Serve Integration', () => { + const mockSpan = SentryCore.startInactiveSpan({ name: 'test span' }); + const setAttributesSpy = spyOn(mockSpan, 'setAttributes'); const continueTraceSpy = spyOn(SentryCore, 'continueTrace'); - const startSpanSpy = spyOn(SentryCore, 'startSpan'); + const startSpanSpy = spyOn(SentryCore, 'startSpan').mockImplementation((_opts, cb) => { + return cb(mockSpan as unknown as Span); + }); beforeAll(() => { instrumentBunServe(); @@ -13,6 +18,7 @@ describe('Bun Serve Integration', () => { beforeEach(() => { startSpanSpy.mockClear(); continueTraceSpy.mockClear(); + setAttributesSpy.mockClear(); }); // Fun fact: Bun = 2 21 14 :) @@ -27,7 +33,7 @@ describe('Bun Serve Integration', () => { test('generates a transaction around a request', async () => { const server = Bun.serve({ async fetch(_req) { - return new Response('Bun!'); + return new Response('Bun!', { headers: new Headers({ 'x-custom': 'value' }) }); }, port, }); @@ -58,6 +64,10 @@ describe('Bun Serve Integration', () => { }, expect.any(Function), ); + + expect(setAttributesSpy).toHaveBeenCalledWith({ + 'http.response.header.x_custom': 'value', + }); }); test('generates a post transaction', async () => { diff --git a/packages/cloudflare/package.json b/packages/cloudflare/package.json index dbe27fc0be87..0c24fe36f18d 100644 --- a/packages/cloudflare/package.json +++ b/packages/cloudflare/package.json @@ -78,8 +78,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-cloudflare-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/cjs/*.js && es-check es2022 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/cloudflare/src/handler.ts b/packages/cloudflare/src/handler.ts deleted file mode 100644 index 29b32225e433..000000000000 --- a/packages/cloudflare/src/handler.ts +++ /dev/null @@ -1,261 +0,0 @@ -import { - captureException, - SEMANTIC_ATTRIBUTE_SENTRY_OP, - SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, - SEMANTIC_ATTRIBUTE_SENTRY_SOURCE, - startSpan, - withIsolationScope, -} from '@sentry/core'; -import { setAsyncLocalStorageAsyncContextStrategy } from './async'; -import type { CloudflareOptions } from './client'; -import { flushAndDispose } from './flush'; -import { isInstrumented, markAsInstrumented } from './instrument'; -import { getHonoIntegration } from './integrations/hono'; -import { getFinalOptions } from './options'; -import { wrapRequestHandler } from './request'; -import { addCloudResourceContext } from './scope-utils'; -import { init } from './sdk'; -import { instrumentContext } from './utils/instrumentContext'; - -/** - * Wrapper for Cloudflare handlers. - * - * Initializes the SDK and wraps the handler with Sentry instrumentation. - * - * Automatically instruments the `fetch` method of the handler. - * - * @param optionsCallback Function that returns the options for the SDK initialization. - * @param handler {ExportedHandler} The handler to wrap. - * @returns The wrapped handler. - */ -// eslint-disable-next-line complexity -export function withSentry< - Env = unknown, - QueueHandlerMessage = unknown, - CfHostMetadata = unknown, - T extends ExportedHandler = ExportedHandler< - Env, - QueueHandlerMessage, - CfHostMetadata - >, ->(optionsCallback: (env: Env) => CloudflareOptions | undefined, handler: T): T { - setAsyncLocalStorageAsyncContextStrategy(); - - try { - if ('fetch' in handler && typeof handler.fetch === 'function' && !isInstrumented(handler.fetch)) { - handler.fetch = new Proxy(handler.fetch, { - apply(target, thisArg, args: Parameters>) { - const [request, env, ctx] = args; - const context = instrumentContext(ctx); - args[2] = context; - - const options = getFinalOptions(optionsCallback(env), env); - - return wrapRequestHandler({ options, request, context }, () => target.apply(thisArg, args)); - }, - }); - - markAsInstrumented(handler.fetch); - } - - /* Hono does not reach the catch block of the fetch handler and captureException needs to be called in the hono errorHandler */ - if ( - 'onError' in handler && - 'errorHandler' in handler && - typeof handler.errorHandler === 'function' && - !isInstrumented(handler.errorHandler) - ) { - handler.errorHandler = new Proxy(handler.errorHandler, { - apply(target, thisArg, args) { - const [err, context] = args; - - getHonoIntegration()?.handleHonoException(err, context); - - return Reflect.apply(target, thisArg, args); - }, - }); - - markAsInstrumented(handler.errorHandler); - } - - if ('scheduled' in handler && typeof handler.scheduled === 'function' && !isInstrumented(handler.scheduled)) { - handler.scheduled = new Proxy(handler.scheduled, { - apply(target, thisArg, args: Parameters>) { - const [event, env, ctx] = args; - const context = instrumentContext(ctx); - args[2] = context; - - return withIsolationScope(isolationScope => { - const options = getFinalOptions(optionsCallback(env), env); - const waitUntil = context.waitUntil.bind(context); - - const client = init({ ...options, ctx: context }); - isolationScope.setClient(client); - - addCloudResourceContext(isolationScope); - - return startSpan( - { - op: 'faas.cron', - name: `Scheduled Cron ${event.cron}`, - attributes: { - 'faas.cron': event.cron, - 'faas.time': new Date(event.scheduledTime).toISOString(), - 'faas.trigger': 'timer', - [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.faas.cloudflare.scheduled', - [SEMANTIC_ATTRIBUTE_SENTRY_SOURCE]: 'task', - }, - }, - async () => { - try { - return await (target.apply(thisArg, args) as ReturnType); - } catch (e) { - captureException(e, { mechanism: { handled: false, type: 'auto.faas.cloudflare.scheduled' } }); - throw e; - } finally { - waitUntil(flushAndDispose(client)); - } - }, - ); - }); - }, - }); - - markAsInstrumented(handler.scheduled); - } - - if ('email' in handler && typeof handler.email === 'function' && !isInstrumented(handler.email)) { - handler.email = new Proxy(handler.email, { - apply(target, thisArg, args: Parameters>) { - const [emailMessage, env, ctx] = args; - const context = instrumentContext(ctx); - args[2] = context; - - return withIsolationScope(isolationScope => { - const options = getFinalOptions(optionsCallback(env), env); - const waitUntil = context.waitUntil.bind(context); - - const client = init({ ...options, ctx: context }); - isolationScope.setClient(client); - - addCloudResourceContext(isolationScope); - - return startSpan( - { - op: 'faas.email', - name: `Handle Email ${emailMessage.to}`, - attributes: { - 'faas.trigger': 'email', - [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.faas.cloudflare.email', - [SEMANTIC_ATTRIBUTE_SENTRY_SOURCE]: 'task', - }, - }, - async () => { - try { - return await (target.apply(thisArg, args) as ReturnType); - } catch (e) { - captureException(e, { mechanism: { handled: false, type: 'auto.faas.cloudflare.email' } }); - throw e; - } finally { - waitUntil(flushAndDispose(client)); - } - }, - ); - }); - }, - }); - - markAsInstrumented(handler.email); - } - - if ('queue' in handler && typeof handler.queue === 'function' && !isInstrumented(handler.queue)) { - handler.queue = new Proxy(handler.queue, { - apply(target, thisArg, args: Parameters>) { - const [batch, env, ctx] = args; - const context = instrumentContext(ctx); - args[2] = context; - - return withIsolationScope(isolationScope => { - const options = getFinalOptions(optionsCallback(env), env); - const waitUntil = context.waitUntil.bind(context); - - const client = init({ ...options, ctx: context }); - isolationScope.setClient(client); - - addCloudResourceContext(isolationScope); - - return startSpan( - { - op: 'faas.queue', - name: `process ${batch.queue}`, - attributes: { - 'faas.trigger': 'pubsub', - 'messaging.destination.name': batch.queue, - 'messaging.system': 'cloudflare', - 'messaging.batch.message_count': batch.messages.length, - 'messaging.message.retry.count': batch.messages.reduce( - (acc, message) => acc + message.attempts - 1, - 0, - ), - [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'queue.process', - [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.faas.cloudflare.queue', - [SEMANTIC_ATTRIBUTE_SENTRY_SOURCE]: 'task', - }, - }, - async () => { - try { - return await (target.apply(thisArg, args) as ReturnType); - } catch (e) { - captureException(e, { mechanism: { handled: false, type: 'auto.faas.cloudflare.queue' } }); - throw e; - } finally { - waitUntil(flushAndDispose(client)); - } - }, - ); - }); - }, - }); - - markAsInstrumented(handler.queue); - } - - if ('tail' in handler && typeof handler.tail === 'function' && !isInstrumented(handler.tail)) { - handler.tail = new Proxy(handler.tail, { - apply(target, thisArg, args: Parameters>) { - const [, env, ctx] = args; - const context = instrumentContext(ctx); - args[2] = context; - - return withIsolationScope(async isolationScope => { - const options = getFinalOptions(optionsCallback(env), env); - - const waitUntil = context.waitUntil.bind(context); - - const client = init({ ...options, ctx: context }); - isolationScope.setClient(client); - - addCloudResourceContext(isolationScope); - - try { - return await (target.apply(thisArg, args) as ReturnType); - } catch (e) { - captureException(e, { mechanism: { handled: false, type: 'auto.faas.cloudflare.tail' } }); - throw e; - } finally { - waitUntil(flushAndDispose(client)); - } - }); - }, - }); - - markAsInstrumented(handler.tail); - } - - // This is here because Miniflare sometimes cannot get instrumented - } catch { - // Do not console anything here, we don't want to spam the console with errors - } - - return handler; -} diff --git a/packages/cloudflare/src/index.ts b/packages/cloudflare/src/index.ts index 62263627aa24..affb4a4f0b45 100644 --- a/packages/cloudflare/src/index.ts +++ b/packages/cloudflare/src/index.ts @@ -50,6 +50,7 @@ export { setTag, setTags, setUser, + setConversationId, getSpanStatusFromHttpCode, setHttpStatus, withScope, @@ -108,7 +109,7 @@ export { instrumentLangGraph, } from '@sentry/core'; -export { withSentry } from './handler'; +export { withSentry } from './withSentry'; export { instrumentDurableObjectWithSentry } from './durableobject'; export { sentryPagesPlugin } from './pages-plugin'; diff --git a/packages/cloudflare/src/instrumentations/worker/instrumentEmail.ts b/packages/cloudflare/src/instrumentations/worker/instrumentEmail.ts new file mode 100644 index 000000000000..8c91bf2cb3d2 --- /dev/null +++ b/packages/cloudflare/src/instrumentations/worker/instrumentEmail.ts @@ -0,0 +1,83 @@ +import type { EmailMessage, ExportedHandler } from '@cloudflare/workers-types'; +import { + captureException, + SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, + SEMANTIC_ATTRIBUTE_SENTRY_SOURCE, + startSpan, + withIsolationScope, +} from '@sentry/core'; +import type { CloudflareOptions } from '../../client'; +import { flushAndDispose } from '../../flush'; +import { isInstrumented, markAsInstrumented } from '../../instrument'; +import { getFinalOptions } from '../../options'; +import { addCloudResourceContext } from '../../scope-utils'; +import { init } from '../../sdk'; +import { instrumentContext } from '../../utils/instrumentContext'; + +/** + * Core email handler logic - wraps execution with Sentry instrumentation. + */ +function wrapEmailHandler( + emailMessage: EmailMessage, + options: CloudflareOptions, + context: ExecutionContext, + fn: () => unknown, +): unknown { + return withIsolationScope(isolationScope => { + const waitUntil = context.waitUntil.bind(context); + + const client = init({ ...options, ctx: context }); + isolationScope.setClient(client); + + addCloudResourceContext(isolationScope); + + return startSpan( + { + op: 'faas.email', + name: `Handle Email ${emailMessage.to}`, + attributes: { + 'faas.trigger': 'email', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.faas.cloudflare.email', + [SEMANTIC_ATTRIBUTE_SENTRY_SOURCE]: 'task', + }, + }, + async () => { + try { + return await fn(); + } catch (e) { + captureException(e, { mechanism: { handled: false, type: 'auto.faas.cloudflare.email' } }); + throw e; + } finally { + waitUntil(flushAndDispose(client)); + } + }, + ); + }); +} + +/** + * Instruments an email handler for ExportedHandler (env/ctx come from args). + */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export function instrumentExportedHandlerEmail>( + handler: T, + optionsCallback: (env: Parameters>[1]) => CloudflareOptions | undefined, +): void { + if (!('email' in handler) || typeof handler.email !== 'function' || isInstrumented(handler.email)) { + return; + } + + handler.email = new Proxy(handler.email, { + apply(target, thisArg, args: Parameters>) { + const [emailMessage, env, ctx] = args; + const context = instrumentContext(ctx); + args[2] = context; + + const options = getFinalOptions(optionsCallback(env), env); + + return wrapEmailHandler(emailMessage, options, context, () => target.apply(thisArg, args)); + }, + }); + + markAsInstrumented(handler.email); +} diff --git a/packages/cloudflare/src/instrumentations/worker/instrumentFetch.ts b/packages/cloudflare/src/instrumentations/worker/instrumentFetch.ts new file mode 100644 index 000000000000..be58fa07e18f --- /dev/null +++ b/packages/cloudflare/src/instrumentations/worker/instrumentFetch.ts @@ -0,0 +1,33 @@ +import type { ExportedHandler } from '@cloudflare/workers-types'; +import type { CloudflareOptions } from '../../client'; +import { isInstrumented, markAsInstrumented } from '../../instrument'; +import { getFinalOptions } from '../../options'; +import { wrapRequestHandler } from '../../request'; +import { instrumentContext } from '../../utils/instrumentContext'; + +/** + * Instruments a fetch handler for ExportedHandler (env/ctx come from args). + */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export function instrumentExportedHandlerFetch>( + handler: T, + optionsCallback: (env: Parameters>[1]) => CloudflareOptions | undefined, +): void { + if (!('fetch' in handler) || typeof handler.fetch !== 'function' || isInstrumented(handler.fetch)) { + return; + } + + handler.fetch = new Proxy(handler.fetch, { + apply(target, thisArg, args: Parameters>) { + const [request, env, ctx] = args; + const context = instrumentContext(ctx); + args[2] = context; + + const options = getFinalOptions(optionsCallback(env), env); + + return wrapRequestHandler({ options, request, context }, () => target.apply(thisArg, args)); + }, + }); + + markAsInstrumented(handler.fetch); +} diff --git a/packages/cloudflare/src/instrumentations/worker/instrumentQueue.ts b/packages/cloudflare/src/instrumentations/worker/instrumentQueue.ts new file mode 100644 index 000000000000..366fb7e98f51 --- /dev/null +++ b/packages/cloudflare/src/instrumentations/worker/instrumentQueue.ts @@ -0,0 +1,89 @@ +import type { ExportedHandler, MessageBatch } from '@cloudflare/workers-types'; +import { + captureException, + SEMANTIC_ATTRIBUTE_SENTRY_OP, + SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, + SEMANTIC_ATTRIBUTE_SENTRY_SOURCE, + startSpan, + withIsolationScope, +} from '@sentry/core'; +import type { CloudflareOptions } from '../../client'; +import { flushAndDispose } from '../../flush'; +import { isInstrumented, markAsInstrumented } from '../../instrument'; +import { getFinalOptions } from '../../options'; +import { addCloudResourceContext } from '../../scope-utils'; +import { init } from '../../sdk'; +import { instrumentContext } from '../../utils/instrumentContext'; + +/** + * Core queue handler logic - wraps execution with Sentry instrumentation. + */ +function wrapQueueHandler( + batch: MessageBatch, + options: CloudflareOptions, + context: ExecutionContext, + fn: () => unknown, +): unknown { + return withIsolationScope(isolationScope => { + const waitUntil = context.waitUntil.bind(context); + + const client = init({ ...options, ctx: context }); + isolationScope.setClient(client); + + addCloudResourceContext(isolationScope); + + return startSpan( + { + op: 'faas.queue', + name: `process ${batch.queue}`, + attributes: { + 'faas.trigger': 'pubsub', + 'messaging.destination.name': batch.queue, + 'messaging.system': 'cloudflare', + 'messaging.batch.message_count': batch.messages.length, + 'messaging.message.retry.count': batch.messages.reduce((acc, message) => acc + message.attempts - 1, 0), + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'queue.process', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.faas.cloudflare.queue', + [SEMANTIC_ATTRIBUTE_SENTRY_SOURCE]: 'task', + }, + }, + async () => { + try { + return await fn(); + } catch (e) { + captureException(e, { mechanism: { handled: false, type: 'auto.faas.cloudflare.queue' } }); + throw e; + } finally { + waitUntil(flushAndDispose(client)); + } + }, + ); + }); +} + +/** + * Instruments a queue handler for ExportedHandler (env/ctx come from args). + */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export function instrumentExportedHandlerQueue>( + handler: T, + optionsCallback: (env: Parameters>[1]) => CloudflareOptions | undefined, +): void { + if (!('queue' in handler) || typeof handler.queue !== 'function' || isInstrumented(handler.queue)) { + return; + } + + handler.queue = new Proxy(handler.queue, { + apply(target, thisArg, args: Parameters>) { + const [batch, env, ctx] = args; + const context = instrumentContext(ctx); + args[2] = context; + + const options = getFinalOptions(optionsCallback(env), env); + + return wrapQueueHandler(batch, options, context, () => target.apply(thisArg, args)); + }, + }); + + markAsInstrumented(handler.queue); +} diff --git a/packages/cloudflare/src/instrumentations/worker/instrumentScheduled.ts b/packages/cloudflare/src/instrumentations/worker/instrumentScheduled.ts new file mode 100644 index 000000000000..2ef682829bcb --- /dev/null +++ b/packages/cloudflare/src/instrumentations/worker/instrumentScheduled.ts @@ -0,0 +1,85 @@ +import type { ExportedHandler, ScheduledController } from '@cloudflare/workers-types'; +import { + captureException, + SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, + SEMANTIC_ATTRIBUTE_SENTRY_SOURCE, + startSpan, + withIsolationScope, +} from '@sentry/core'; +import type { CloudflareOptions } from '../../client'; +import { flushAndDispose } from '../../flush'; +import { isInstrumented, markAsInstrumented } from '../../instrument'; +import { getFinalOptions } from '../../options'; +import { addCloudResourceContext } from '../../scope-utils'; +import { init } from '../../sdk'; +import { instrumentContext } from '../../utils/instrumentContext'; + +/** + * Core scheduled handler logic - wraps execution with Sentry instrumentation. + */ +function wrapScheduledHandler( + controller: ScheduledController, + options: CloudflareOptions, + context: ExecutionContext, + fn: () => unknown, +): unknown { + return withIsolationScope(isolationScope => { + const waitUntil = context.waitUntil.bind(context); + + const client = init({ ...options, ctx: context }); + isolationScope.setClient(client); + + addCloudResourceContext(isolationScope); + + return startSpan( + { + op: 'faas.cron', + name: `Scheduled Cron ${controller.cron}`, + attributes: { + 'faas.cron': controller.cron, + 'faas.time': new Date(controller.scheduledTime).toISOString(), + 'faas.trigger': 'timer', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.faas.cloudflare.scheduled', + [SEMANTIC_ATTRIBUTE_SENTRY_SOURCE]: 'task', + }, + }, + async () => { + try { + return await fn(); + } catch (e) { + captureException(e, { mechanism: { handled: false, type: 'auto.faas.cloudflare.scheduled' } }); + throw e; + } finally { + waitUntil(flushAndDispose(client)); + } + }, + ); + }); +} + +/** + * Instruments a scheduled handler for ExportedHandler (env/ctx come from args). + */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export function instrumentExportedHandlerScheduled>( + handler: T, + optionsCallback: (env: Parameters>[1]) => CloudflareOptions | undefined, +): void { + if (!('scheduled' in handler) || typeof handler.scheduled !== 'function' || isInstrumented(handler.scheduled)) { + return; + } + + handler.scheduled = new Proxy(handler.scheduled, { + apply(target, thisArg, args: Parameters>) { + const [controller, env, ctx] = args; + const context = instrumentContext(ctx); + args[2] = context; + + const options = getFinalOptions(optionsCallback(env), env); + + return wrapScheduledHandler(controller, options, context, () => target.apply(thisArg, args)); + }, + }); + + markAsInstrumented(handler.scheduled); +} diff --git a/packages/cloudflare/src/instrumentations/worker/instrumentTail.ts b/packages/cloudflare/src/instrumentations/worker/instrumentTail.ts new file mode 100644 index 000000000000..f6b2e4492106 --- /dev/null +++ b/packages/cloudflare/src/instrumentations/worker/instrumentTail.ts @@ -0,0 +1,60 @@ +import type { ExportedHandler } from '@cloudflare/workers-types'; +import { captureException, withIsolationScope } from '@sentry/core'; +import type { CloudflareOptions } from '../../client'; +import { flushAndDispose } from '../../flush'; +import { isInstrumented, markAsInstrumented } from '../../instrument'; +import { getFinalOptions } from '../../options'; +import { addCloudResourceContext } from '../../scope-utils'; +import { init } from '../../sdk'; +import { instrumentContext } from '../../utils/instrumentContext'; + +/** + * Core tail handler logic - wraps execution with Sentry instrumentation. + * Note: tail handlers don't create spans, just error capture. + */ +function wrapTailHandler(options: CloudflareOptions, context: ExecutionContext, fn: () => unknown): unknown { + return withIsolationScope(async isolationScope => { + const waitUntil = context.waitUntil.bind(context); + + const client = init({ ...options, ctx: context }); + isolationScope.setClient(client); + + addCloudResourceContext(isolationScope); + + try { + return await fn(); + } catch (e) { + captureException(e, { mechanism: { handled: false, type: 'auto.faas.cloudflare.tail' } }); + throw e; + } finally { + waitUntil(flushAndDispose(client)); + } + }); +} + +/** + * Instruments a tail handler for ExportedHandler (env/ctx come from args). + */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export function instrumentExportedHandlerTail>( + handler: T, + optionsCallback: (env: Parameters>[1]) => CloudflareOptions | undefined, +): void { + if (!('tail' in handler) || typeof handler.tail !== 'function' || isInstrumented(handler.tail)) { + return; + } + + handler.tail = new Proxy(handler.tail, { + apply(target, thisArg, args: Parameters>) { + const [, env, ctx] = args; + const context = instrumentContext(ctx); + args[2] = context; + + const options = getFinalOptions(optionsCallback(env), env); + + return wrapTailHandler(options, context, () => target.apply(thisArg, args)); + }, + }); + + markAsInstrumented(handler.tail); +} diff --git a/packages/cloudflare/src/request.ts b/packages/cloudflare/src/request.ts index 5ad215aab428..9d8d63eab8c1 100644 --- a/packages/cloudflare/src/request.ts +++ b/packages/cloudflare/src/request.ts @@ -162,7 +162,7 @@ export function wrapRequestHandler( statusText: res.statusText, headers: res.headers, }); - } catch (e) { + } catch (_e) { // tee() failed (e.g stream already locked) - fall back to non-streaming handling span.end(); waitUntil?.(flushAndDispose(client)); diff --git a/packages/cloudflare/src/withSentry.ts b/packages/cloudflare/src/withSentry.ts new file mode 100644 index 000000000000..addc82429b85 --- /dev/null +++ b/packages/cloudflare/src/withSentry.ts @@ -0,0 +1,69 @@ +import { setAsyncLocalStorageAsyncContextStrategy } from './async'; +import type { CloudflareOptions } from './client'; +import { isInstrumented, markAsInstrumented } from './instrument'; +import { getHonoIntegration } from './integrations/hono'; +import { instrumentExportedHandlerEmail } from './instrumentations/worker/instrumentEmail'; +import { instrumentExportedHandlerFetch } from './instrumentations/worker/instrumentFetch'; +import { instrumentExportedHandlerQueue } from './instrumentations/worker/instrumentQueue'; +import { instrumentExportedHandlerScheduled } from './instrumentations/worker/instrumentScheduled'; +import { instrumentExportedHandlerTail } from './instrumentations/worker/instrumentTail'; + +/** + * Wrapper for Cloudflare handlers. + * + * Initializes the SDK and wraps the handler with Sentry instrumentation. + * + * Automatically instruments the `fetch` method of the handler. + * + * @param optionsCallback Function that returns the options for the SDK initialization. + * @param handler {ExportedHandler} The handler to wrap. + * @returns The wrapped handler. + */ +export function withSentry< + Env = unknown, + QueueHandlerMessage = unknown, + CfHostMetadata = unknown, + T extends ExportedHandler = ExportedHandler< + Env, + QueueHandlerMessage, + CfHostMetadata + >, +>(optionsCallback: (env: Env) => CloudflareOptions | undefined, handler: T): T { + setAsyncLocalStorageAsyncContextStrategy(); + + try { + instrumentExportedHandlerFetch(handler, optionsCallback); + instrumentHonoErrorHandler(handler); + instrumentExportedHandlerScheduled(handler, optionsCallback); + instrumentExportedHandlerEmail(handler, optionsCallback); + instrumentExportedHandlerQueue(handler, optionsCallback); + instrumentExportedHandlerTail(handler, optionsCallback); + // This is here because Miniflare sometimes cannot get instrumented + } catch { + // Do not console anything here, we don't want to spam the console with errors + } + + return handler; +} + +// eslint-disable-next-line @typescript-eslint/no-explicit-any +function instrumentHonoErrorHandler>(handler: T): void { + if ( + 'onError' in handler && + 'errorHandler' in handler && + typeof handler.errorHandler === 'function' && + !isInstrumented(handler.errorHandler) + ) { + handler.errorHandler = new Proxy(handler.errorHandler, { + apply(target, thisArg, args) { + const [err, context] = args; + + getHonoIntegration()?.handleHonoException(err, context); + + return Reflect.apply(target, thisArg, args); + }, + }); + + markAsInstrumented(handler.errorHandler); + } +} diff --git a/packages/cloudflare/src/wrapMethodWithSentry.ts b/packages/cloudflare/src/wrapMethodWithSentry.ts index 3c719e7da4b1..2361ee5b718d 100644 --- a/packages/cloudflare/src/wrapMethodWithSentry.ts +++ b/packages/cloudflare/src/wrapMethodWithSentry.ts @@ -68,13 +68,17 @@ export function wrapMethodWithSentry( const waitUntil = context?.waitUntil?.bind?.(context); - const currentClient = scope.getClient(); - if (!currentClient) { + let currentClient = scope.getClient(); + // Check if client exists AND is still usable (transport not disposed) + // This handles the case where a previous handler disposed the client + // but the scope still holds a reference to it (e.g., alarm handlers in Durable Objects) + if (!currentClient?.getTransport()) { const client = init({ ...wrapperOptions.options, ctx: context as unknown as ExecutionContext | undefined }); scope.setClient(client); + currentClient = client; } - const clientToDispose = currentClient || scope.getClient(); + const clientToDispose = currentClient; if (!wrapperOptions.spanName) { try { diff --git a/packages/cloudflare/test/handler.test.ts b/packages/cloudflare/test/handler.test.ts deleted file mode 100644 index 52ed02d07ee1..000000000000 --- a/packages/cloudflare/test/handler.test.ts +++ /dev/null @@ -1,1254 +0,0 @@ -// Note: These tests run the handler in Node.js, which has some differences to the cloudflare workers runtime. -// Although this is not ideal, this is the best we can do until we have a better way to test cloudflare workers. - -import type { - ExecutionContext, - ForwardableEmailMessage, - MessageBatch, - ScheduledController, - TraceItem, -} from '@cloudflare/workers-types'; -import type { Event } from '@sentry/core'; -import * as SentryCore from '@sentry/core'; -import { beforeEach, describe, expect, onTestFinished, test, vi } from 'vitest'; -import { CloudflareClient } from '../src/client'; -import { withSentry } from '../src/handler'; -import { markAsInstrumented } from '../src/instrument'; -import * as HonoIntegration from '../src/integrations/hono'; - -// Custom type for hono-like apps (cloudflare handlers) that include errorHandler and onError -type HonoLikeApp = ExportedHandler< - Env, - QueueHandlerMessage, - CfHostMetadata -> & { - onError?: () => void; - errorHandler?: (err: Error) => Response; -}; - -const MOCK_ENV = { - SENTRY_DSN: 'https://public@dsn.ingest.sentry.io/1337', - SENTRY_RELEASE: '1.1.1', -}; - -// Mock env without DSN for tests that should not initialize the SDK -const MOCK_ENV_WITHOUT_DSN = { - SENTRY_RELEASE: '1.1.1', -}; - -function addDelayedWaitUntil(context: ExecutionContext) { - context.waitUntil(new Promise(resolve => setTimeout(() => resolve()))); -} - -describe('withSentry', () => { - beforeEach(() => { - vi.clearAllMocks(); - }); - - describe('fetch handler', () => { - test('executes options callback with env', async () => { - const handler = { - fetch(_request, _env, _context) { - return new Response('test'); - }, - } satisfies ExportedHandler; - - const optionsCallback = vi.fn().mockReturnValue({}); - - const wrappedHandler = withSentry(optionsCallback, handler); - await wrappedHandler.fetch?.(new Request('https://example.com'), MOCK_ENV, createMockExecutionContext()); - - expect(optionsCallback).toHaveBeenCalledTimes(1); - expect(optionsCallback).toHaveBeenLastCalledWith(MOCK_ENV); - }); - - test('passes through the handler response', async () => { - const response = new Response('test'); - const handler = { - async fetch(_request, _env, _context) { - return response; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - const result = await wrappedHandler.fetch?.( - new Request('https://example.com'), - MOCK_ENV, - createMockExecutionContext(), - ); - - // Response may be wrapped for streaming detection, verify content - expect(result?.status).toBe(response.status); - if (result) { - expect(await result.text()).toBe('test'); - } - }); - - test('merges options from env and callback', async () => { - const handler = { - fetch(_request, _env, _context) { - throw new Error('test'); - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - - try { - await wrappedHandler.fetch?.(new Request('https://example.com'), MOCK_ENV, createMockExecutionContext()); - } catch { - // ignore - } - - expect(sentryEvent.release).toEqual('1.1.1'); - }); - - test('callback options take precedence over env options', async () => { - const handler = { - fetch(_request, _env, _context) { - throw new Error('test'); - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - release: '2.0.0', - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - - try { - await wrappedHandler.fetch?.(new Request('https://example.com'), MOCK_ENV, createMockExecutionContext()); - } catch { - // ignore - } - - expect(sentryEvent.release).toEqual('2.0.0'); - }); - - test('flush must be called when all waitUntil are done', async () => { - const flush = vi.spyOn(SentryCore.Client.prototype, 'flush'); - vi.useFakeTimers(); - onTestFinished(() => { - vi.useRealTimers(); - }); - const handler = { - fetch(_request, _env, _context) { - addDelayedWaitUntil(_context); - return new Response('test'); - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(vi.fn(), handler); - const waits: Promise[] = []; - const waitUntil = vi.fn(promise => waits.push(promise)); - await wrappedHandler.fetch?.(new Request('https://example.com'), MOCK_ENV_WITHOUT_DSN, { - waitUntil, - } as unknown as ExecutionContext); - expect(flush).not.toBeCalled(); - expect(waitUntil).toBeCalled(); - vi.advanceTimersToNextTimer().runAllTimers(); - await Promise.all(waits); - expect(flush).toHaveBeenCalledOnce(); - }); - }); - - describe('scheduled handler', () => { - test('executes options callback with env', async () => { - const handler = { - scheduled(_controller, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const optionsCallback = vi.fn().mockReturnValue({}); - - const wrappedHandler = withSentry(optionsCallback, handler); - await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); - - expect(optionsCallback).toHaveBeenCalledTimes(1); - expect(optionsCallback).toHaveBeenLastCalledWith(MOCK_ENV); - }); - - test('merges options from env and callback', async () => { - const handler = { - scheduled(_controller, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.release).toBe('1.1.1'); - }); - - test('callback options take precedence over env options', async () => { - const handler = { - scheduled(_controller, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - release: '2.0.0', - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.release).toEqual('2.0.0'); - }); - - test('flushes the event after the handler is done using the cloudflare context.waitUntil', async () => { - const handler = { - scheduled(_controller, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const context = createMockExecutionContext(); - const waitUntilSpy = vi.spyOn(context, 'waitUntil'); - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, context); - - expect(waitUntilSpy).toHaveBeenCalledTimes(1); - expect(waitUntilSpy).toHaveBeenLastCalledWith(expect.any(Promise)); - }); - - test('creates a cloudflare client and sets it on the handler', async () => { - const initAndBindSpy = vi.spyOn(SentryCore, 'initAndBind'); - const handler = { - scheduled(_controller, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); - - expect(initAndBindSpy).toHaveBeenCalledTimes(1); - expect(initAndBindSpy).toHaveBeenLastCalledWith(CloudflareClient, expect.any(Object)); - }); - - describe('scope instrumentation', () => { - test('adds cloud resource context', async () => { - const handler = { - scheduled(_controller, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.contexts?.cloud_resource).toEqual({ 'cloud.provider': 'cloudflare' }); - }); - }); - - describe('error instrumentation', () => { - test('captures errors thrown by the handler', async () => { - const captureExceptionSpy = vi.spyOn(SentryCore, 'captureException'); - const error = new Error('test'); - - expect(captureExceptionSpy).not.toHaveBeenCalled(); - - const handler = { - scheduled(_controller, _env, _context) { - throw error; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - try { - await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); - } catch { - // ignore - } - - expect(captureExceptionSpy).toHaveBeenCalledTimes(1); - expect(captureExceptionSpy).toHaveBeenLastCalledWith(error, { - mechanism: { handled: false, type: 'auto.faas.cloudflare.scheduled' }, - }); - }); - - test('re-throws the error after capturing', async () => { - const error = new Error('test'); - const handler = { - scheduled(_controller, _env, _context) { - throw error; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - - let thrownError: Error | undefined; - try { - await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); - } catch (e: any) { - thrownError = e; - } - - expect(thrownError).toBe(error); - }); - }); - - describe('tracing instrumentation', () => { - test('creates a span that wraps scheduled invocation', async () => { - const handler = { - scheduled(_controller, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - tracesSampleRate: 1, - beforeSendTransaction(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - - await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.transaction).toEqual('Scheduled Cron 0 0 0 * * *'); - expect(sentryEvent.spans).toHaveLength(0); - expect(sentryEvent.contexts?.trace).toEqual({ - data: { - 'sentry.origin': 'auto.faas.cloudflare.scheduled', - 'sentry.op': 'faas.cron', - 'faas.cron': '0 0 0 * * *', - 'faas.time': expect.any(String), - 'faas.trigger': 'timer', - 'sentry.sample_rate': 1, - 'sentry.source': 'task', - }, - op: 'faas.cron', - origin: 'auto.faas.cloudflare.scheduled', - span_id: expect.stringMatching(/[a-f0-9]{16}/), - trace_id: expect.stringMatching(/[a-f0-9]{32}/), - }); - }); - }); - - test('flush must be called when all waitUntil are done', async () => { - const flush = vi.spyOn(SentryCore.Client.prototype, 'flush'); - vi.useFakeTimers(); - onTestFinished(() => { - vi.useRealTimers(); - }); - const handler = { - scheduled(_controller, _env, _context) { - addDelayedWaitUntil(_context); - return; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(vi.fn(), handler); - const waits: Promise[] = []; - const waitUntil = vi.fn(promise => waits.push(promise)); - await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV_WITHOUT_DSN, { - waitUntil, - } as unknown as ExecutionContext); - expect(flush).not.toBeCalled(); - expect(waitUntil).toBeCalled(); - vi.advanceTimersToNextTimer().runAllTimers(); - await Promise.all(waits); - expect(flush).toHaveBeenCalledOnce(); - }); - }); - - describe('email handler', () => { - test('executes options callback with env', async () => { - const handler = { - email(_message, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const optionsCallback = vi.fn().mockReturnValue({}); - - const wrappedHandler = withSentry(optionsCallback, handler); - await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); - - expect(optionsCallback).toHaveBeenCalledTimes(1); - expect(optionsCallback).toHaveBeenLastCalledWith(MOCK_ENV); - }); - - test('merges options from env and callback', async () => { - const handler = { - email(_message, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.release).toBe('1.1.1'); - }); - - test('callback options take precedence over env options', async () => { - const handler = { - email(_message, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - release: '2.0.0', - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.release).toEqual('2.0.0'); - }); - - test('flushes the event after the handler is done using the cloudflare context.waitUntil', async () => { - const handler = { - email(_message, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const context = createMockExecutionContext(); - const waitUntilSpy = vi.spyOn(context, 'waitUntil'); - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, context); - - expect(waitUntilSpy).toHaveBeenCalledTimes(1); - expect(waitUntilSpy).toHaveBeenLastCalledWith(expect.any(Promise)); - }); - - test('creates a cloudflare client and sets it on the handler', async () => { - const initAndBindSpy = vi.spyOn(SentryCore, 'initAndBind'); - const handler = { - email(_message, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); - - expect(initAndBindSpy).toHaveBeenCalledTimes(1); - expect(initAndBindSpy).toHaveBeenLastCalledWith(CloudflareClient, expect.any(Object)); - }); - - describe('scope instrumentation', () => { - test('adds cloud resource context', async () => { - const handler = { - email(_message, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.contexts?.cloud_resource).toEqual({ 'cloud.provider': 'cloudflare' }); - }); - }); - - describe('error instrumentation', () => { - test('captures errors thrown by the handler', async () => { - const captureExceptionSpy = vi.spyOn(SentryCore, 'captureException'); - const error = new Error('test'); - - expect(captureExceptionSpy).not.toHaveBeenCalled(); - - const handler = { - email(_message, _env, _context) { - throw error; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - try { - await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); - } catch { - // ignore - } - - expect(captureExceptionSpy).toHaveBeenCalledTimes(1); - expect(captureExceptionSpy).toHaveBeenLastCalledWith(error, { - mechanism: { handled: false, type: 'auto.faas.cloudflare.email' }, - }); - }); - - test('re-throws the error after capturing', async () => { - const error = new Error('test'); - const handler = { - email(_message, _env, _context) { - throw error; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - - let thrownError: Error | undefined; - try { - await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); - } catch (e: any) { - thrownError = e; - } - - expect(thrownError).toBe(error); - }); - }); - - describe('tracing instrumentation', () => { - test('creates a span that wraps email invocation', async () => { - const handler = { - email(_message, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - tracesSampleRate: 1, - beforeSendTransaction(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - - const emailMessage = createMockEmailMessage(); - await wrappedHandler.email?.(emailMessage, MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.transaction).toEqual(`Handle Email ${emailMessage.to}`); - expect(sentryEvent.spans).toHaveLength(0); - expect(sentryEvent.contexts?.trace).toEqual({ - data: { - 'sentry.origin': 'auto.faas.cloudflare.email', - 'sentry.op': 'faas.email', - 'faas.trigger': 'email', - 'sentry.sample_rate': 1, - 'sentry.source': 'task', - }, - op: 'faas.email', - origin: 'auto.faas.cloudflare.email', - span_id: expect.stringMatching(/[a-f0-9]{16}/), - trace_id: expect.stringMatching(/[a-f0-9]{32}/), - }); - }); - }); - - test('flush must be called when all waitUntil are done', async () => { - const flush = vi.spyOn(SentryCore.Client.prototype, 'flush'); - vi.useFakeTimers(); - onTestFinished(() => { - vi.useRealTimers(); - }); - const handler = { - email(_controller, _env, _context) { - addDelayedWaitUntil(_context); - return; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(vi.fn(), handler); - const waits: Promise[] = []; - const waitUntil = vi.fn(promise => waits.push(promise)); - await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV_WITHOUT_DSN, { - waitUntil, - } as unknown as ExecutionContext); - expect(flush).not.toBeCalled(); - expect(waitUntil).toBeCalled(); - vi.advanceTimersToNextTimer().runAllTimers(); - await Promise.all(waits); - expect(flush).toHaveBeenCalledOnce(); - }); - }); - - describe('queue handler', () => { - test('executes options callback with env', async () => { - const handler = { - queue(_batch, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const optionsCallback = vi.fn().mockReturnValue({}); - - const wrappedHandler = withSentry(optionsCallback, handler); - await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); - - expect(optionsCallback).toHaveBeenCalledTimes(1); - expect(optionsCallback).toHaveBeenLastCalledWith(MOCK_ENV); - }); - - test('merges options from env and callback', async () => { - const handler = { - queue(_batch, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.release).toBe('1.1.1'); - }); - - test('callback options take precedence over env options', async () => { - const handler = { - queue(_batch, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - release: '2.0.0', - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.release).toEqual('2.0.0'); - }); - - test('flushes the event after the handler is done using the cloudflare context.waitUntil', async () => { - const handler = { - queue(_batch, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const context = createMockExecutionContext(); - const waitUntilSpy = vi.spyOn(context, 'waitUntil'); - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, context); - - expect(waitUntilSpy).toHaveBeenCalledTimes(1); - expect(waitUntilSpy).toHaveBeenLastCalledWith(expect.any(Promise)); - }); - - test('creates a cloudflare client and sets it on the handler', async () => { - const initAndBindSpy = vi.spyOn(SentryCore, 'initAndBind'); - const handler = { - queue(_batch, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); - - expect(initAndBindSpy).toHaveBeenCalledTimes(1); - expect(initAndBindSpy).toHaveBeenLastCalledWith(CloudflareClient, expect.any(Object)); - }); - - describe('scope instrumentation', () => { - test('adds cloud resource context', async () => { - const handler = { - queue(_batch, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.contexts?.cloud_resource).toEqual({ 'cloud.provider': 'cloudflare' }); - }); - }); - - describe('error instrumentation', () => { - test('captures errors thrown by the handler', async () => { - const captureExceptionSpy = vi.spyOn(SentryCore, 'captureException'); - const error = new Error('test'); - - expect(captureExceptionSpy).not.toHaveBeenCalled(); - - const handler = { - queue(_batch, _env, _context) { - throw error; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - try { - await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); - } catch { - // ignore - } - - expect(captureExceptionSpy).toHaveBeenCalledTimes(1); - expect(captureExceptionSpy).toHaveBeenLastCalledWith(error, { - mechanism: { handled: false, type: 'auto.faas.cloudflare.queue' }, - }); - }); - - test('re-throws the error after capturing', async () => { - const error = new Error('test'); - const handler = { - queue(_batch, _env, _context) { - throw error; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - - let thrownError: Error | undefined; - try { - await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); - } catch (e: any) { - thrownError = e; - } - - expect(thrownError).toBe(error); - }); - }); - - describe('tracing instrumentation', () => { - test('creates a span that wraps queue invocation with correct attributes', async () => { - const handler = { - queue(_batch, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - tracesSampleRate: 1, - beforeSendTransaction(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - - const batch = createMockQueueBatch(); - await wrappedHandler.queue?.(batch, MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.transaction).toEqual(`process ${batch.queue}`); - expect(sentryEvent.spans).toHaveLength(0); - expect(sentryEvent.contexts?.trace).toEqual({ - data: { - 'sentry.origin': 'auto.faas.cloudflare.queue', - 'sentry.op': 'queue.process', - 'faas.trigger': 'pubsub', - 'messaging.destination.name': batch.queue, - 'messaging.system': 'cloudflare', - 'messaging.batch.message_count': batch.messages.length, - 'messaging.message.retry.count': batch.messages.reduce((acc, message) => acc + message.attempts - 1, 0), - 'sentry.sample_rate': 1, - 'sentry.source': 'task', - }, - op: 'queue.process', - origin: 'auto.faas.cloudflare.queue', - span_id: expect.stringMatching(/[a-f0-9]{16}/), - trace_id: expect.stringMatching(/[a-f0-9]{32}/), - }); - }); - }); - - test('flush must be called when all waitUntil are done', async () => { - const flush = vi.spyOn(SentryCore.Client.prototype, 'flush'); - vi.useFakeTimers(); - onTestFinished(() => { - vi.useRealTimers(); - }); - const handler = { - queue(_controller, _env, _context) { - addDelayedWaitUntil(_context); - return; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(vi.fn(), handler); - const waits: Promise[] = []; - const waitUntil = vi.fn(promise => waits.push(promise)); - await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV_WITHOUT_DSN, { - waitUntil, - } as unknown as ExecutionContext); - expect(flush).not.toBeCalled(); - expect(waitUntil).toBeCalled(); - vi.advanceTimersToNextTimer().runAllTimers(); - await Promise.all(waits); - expect(flush).toHaveBeenCalledOnce(); - }); - }); - - describe('tail handler', () => { - test('executes options callback with env', async () => { - const handler = { - tail(_event, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const optionsCallback = vi.fn().mockReturnValue({}); - - const wrappedHandler = withSentry(optionsCallback, handler); - await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); - - expect(optionsCallback).toHaveBeenCalledTimes(1); - expect(optionsCallback).toHaveBeenLastCalledWith(MOCK_ENV); - }); - - test('merges options from env and callback', async () => { - const handler = { - tail(_event, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.release).toBe('1.1.1'); - }); - - test('callback options take precedence over env options', async () => { - const handler = { - tail(_event, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - release: '2.0.0', - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.release).toEqual('2.0.0'); - }); - - test('flushes the event after the handler is done using the cloudflare context.waitUntil', async () => { - const handler = { - tail(_event, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const context = createMockExecutionContext(); - const waitUntilSpy = vi.spyOn(context, 'waitUntil'); - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, context); - - expect(waitUntilSpy).toHaveBeenCalledTimes(1); - expect(waitUntilSpy).toHaveBeenLastCalledWith(expect.any(Promise)); - }); - - test('creates a cloudflare client and sets it on the handler', async () => { - const initAndBindSpy = vi.spyOn(SentryCore, 'initAndBind'); - const handler = { - tail(_event, _env, _context) { - return; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); - - expect(initAndBindSpy).toHaveBeenCalledTimes(1); - expect(initAndBindSpy).toHaveBeenLastCalledWith(CloudflareClient, expect.any(Object)); - }); - - describe('scope instrumentation', () => { - test('adds cloud resource context', async () => { - const handler = { - tail(_event, _env, _context) { - SentryCore.captureMessage('cloud_resource'); - return; - }, - } satisfies ExportedHandler; - - let sentryEvent: Event = {}; - const wrappedHandler = withSentry( - env => ({ - dsn: env.SENTRY_DSN, - beforeSend(event) { - sentryEvent = event; - return null; - }, - }), - handler, - ); - await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); - - expect(sentryEvent.contexts?.cloud_resource).toEqual({ 'cloud.provider': 'cloudflare' }); - }); - }); - - describe('error instrumentation', () => { - test('captures errors thrown by the handler', async () => { - const captureExceptionSpy = vi.spyOn(SentryCore, 'captureException'); - const error = new Error('test'); - - expect(captureExceptionSpy).not.toHaveBeenCalled(); - - const handler = { - tail(_event, _env, _context) { - throw error; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - try { - await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); - } catch { - // ignore - } - - expect(captureExceptionSpy).toHaveBeenCalledTimes(1); - expect(captureExceptionSpy).toHaveBeenLastCalledWith(error, { - mechanism: { handled: false, type: 'auto.faas.cloudflare.tail' }, - }); - }); - - test('re-throws the error after capturing', async () => { - const error = new Error('test'); - const handler = { - tail(_event, _env, _context) { - throw error; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); - - let thrownError: Error | undefined; - try { - await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); - } catch (e: any) { - thrownError = e; - } - - expect(thrownError).toBe(error); - }); - }); - - test('flush must be called when all waitUntil are done', async () => { - const flush = vi.spyOn(SentryCore.Client.prototype, 'flush'); - vi.useFakeTimers(); - onTestFinished(() => { - vi.useRealTimers(); - flush.mockRestore(); - }); - const handler = { - tail(_controller, _env, _context) { - addDelayedWaitUntil(_context); - return; - }, - } satisfies ExportedHandler; - - const wrappedHandler = withSentry(vi.fn(), handler); - const waits: Promise[] = []; - const waitUntil = vi.fn(promise => waits.push(promise)); - await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV_WITHOUT_DSN, { - waitUntil, - } as unknown as ExecutionContext); - expect(flush).not.toBeCalled(); - expect(waitUntil).toBeCalled(); - vi.advanceTimersToNextTimer().runAllTimers(); - await Promise.all(waits); - expect(flush).toHaveBeenCalledOnce(); - }); - }); - - describe('hono errorHandler', () => { - test('calls Hono Integration to handle error captured by the errorHandler', async () => { - const error = new Error('test hono error'); - - const handleHonoException = vi.fn(); - vi.spyOn(HonoIntegration, 'getHonoIntegration').mockReturnValue({ handleHonoException } as any); - - const honoApp = { - fetch(_request, _env, _context) { - return new Response('test'); - }, - onError() {}, // hono-like onError - errorHandler(err: Error) { - return new Response(`Error: ${err.message}`, { status: 500 }); - }, - } satisfies HonoLikeApp; - - withSentry(env => ({ dsn: env.SENTRY_DSN }), honoApp); - - // simulates hono's error handling - const errorHandlerResponse = honoApp.errorHandler?.(error); - - expect(handleHonoException).toHaveBeenCalledTimes(1); - // 2nd param is context, which is undefined here - expect(handleHonoException).toHaveBeenLastCalledWith(error, undefined); - expect(errorHandlerResponse?.status).toBe(500); - }); - - test('preserves the original errorHandler functionality', async () => { - const originalErrorHandlerSpy = vi.fn().mockImplementation((err: Error) => { - return new Response(`Error: ${err.message}`, { status: 500 }); - }); - - const error = new Error('test hono error'); - - const honoApp = { - fetch(_request, _env, _context) { - return new Response('test'); - }, - onError() {}, // hono-like onError - errorHandler: originalErrorHandlerSpy, - } satisfies HonoLikeApp; - - withSentry(env => ({ dsn: env.SENTRY_DSN }), honoApp); - - // Call the errorHandler directly to simulate Hono's error handling - const errorHandlerResponse = honoApp.errorHandler?.(error); - - expect(originalErrorHandlerSpy).toHaveBeenCalledTimes(1); - expect(originalErrorHandlerSpy).toHaveBeenLastCalledWith(error); - expect(errorHandlerResponse?.status).toBe(500); - }); - - test('does not instrument an already instrumented errorHandler', async () => { - const captureExceptionSpy = vi.spyOn(SentryCore, 'captureException'); - const error = new Error('test hono error'); - - // Create a handler with an errorHandler that's already been instrumented - const originalErrorHandler = (err: Error) => { - return new Response(`Error: ${err.message}`, { status: 500 }); - }; - - // Mark as instrumented before wrapping - markAsInstrumented(originalErrorHandler); - - const honoApp = { - fetch(_request, _env, _context) { - return new Response('test'); - }, - onError() {}, // hono-like onError - errorHandler: originalErrorHandler, - } satisfies HonoLikeApp; - - withSentry(env => ({ dsn: env.SENTRY_DSN }), honoApp); - - // The errorHandler should not have been wrapped again - honoApp.errorHandler?.(error); - expect(captureExceptionSpy).not.toHaveBeenCalled(); - }); - }); -}); - -function createMockExecutionContext(): ExecutionContext { - return { - waitUntil: vi.fn(), - passThroughOnException: vi.fn(), - }; -} - -function createMockScheduledController(): ScheduledController { - return { - scheduledTime: 123, - cron: '0 0 0 * * *', - noRetry: vi.fn(), - }; -} - -function createMockEmailMessage(): ForwardableEmailMessage { - return { - from: 'sender@example.com', - to: 'recipient@example.com', - raw: new ReadableStream(), - rawSize: 1024, - headers: new Headers(), - setReject: vi.fn(), - forward: vi.fn(), - reply: vi.fn(), - }; -} - -function createMockQueueBatch(): MessageBatch { - return { - queue: 'test-queue', - messages: [ - { - id: '1', - timestamp: new Date(), - body: 'test message 1', - attempts: 1, - retry: vi.fn(), - ack: vi.fn(), - }, - { - id: '2', - timestamp: new Date(), - body: 'test message 2', - attempts: 2, - retry: vi.fn(), - ack: vi.fn(), - }, - ], - retryAll: vi.fn(), - ackAll: vi.fn(), - }; -} - -function createMockTailEvent(): TraceItem[] { - return [ - { - event: { - consumedEvents: [ - { - scriptName: 'test-script', - }, - ], - }, - eventTimestamp: Date.now(), - logs: [ - { - timestamp: Date.now(), - level: 'info', - message: 'Test log message', - }, - ], - exceptions: [], - diagnosticsChannelEvents: [], - scriptName: 'test-script', - outcome: 'ok', - truncated: false, - }, - ]; -} diff --git a/packages/cloudflare/test/instrumentations/worker/instrumentEmail.test.ts b/packages/cloudflare/test/instrumentations/worker/instrumentEmail.test.ts new file mode 100644 index 000000000000..5d2f01b428df --- /dev/null +++ b/packages/cloudflare/test/instrumentations/worker/instrumentEmail.test.ts @@ -0,0 +1,285 @@ +// Note: These tests run the handler in Node.js, which has some differences to the cloudflare workers runtime. +// Although this is not ideal, this is the best we can do until we have a better way to test cloudflare workers. + +import type { ExecutionContext, ForwardableEmailMessage } from '@cloudflare/workers-types'; +import type { Event } from '@sentry/core'; +import * as SentryCore from '@sentry/core'; +import { beforeEach, describe, expect, onTestFinished, test, vi } from 'vitest'; +import { CloudflareClient } from '../../../src/client'; +import { withSentry } from '../../../src/withSentry'; + +const MOCK_ENV = { + SENTRY_DSN: 'https://public@dsn.ingest.sentry.io/1337', + SENTRY_RELEASE: '1.1.1', +}; + +const MOCK_ENV_WITHOUT_DSN = { + SENTRY_RELEASE: '1.1.1', +}; + +function createMockExecutionContext(): ExecutionContext { + return { + waitUntil: vi.fn(), + passThroughOnException: vi.fn(), + }; +} + +function createMockEmailMessage(): ForwardableEmailMessage { + return { + from: 'sender@example.com', + to: 'recipient@example.com', + raw: new ReadableStream(), + rawSize: 1024, + headers: new Headers(), + setReject: vi.fn(), + forward: vi.fn(), + reply: vi.fn(), + }; +} + +function addDelayedWaitUntil(context: ExecutionContext) { + context.waitUntil(new Promise(resolve => setTimeout(() => resolve()))); +} + +describe('instrumentEmail', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + test('executes options callback with env', async () => { + const handler = { + email(_message, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const optionsCallback = vi.fn().mockReturnValue({}); + + const wrappedHandler = withSentry(optionsCallback, handler); + await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); + + expect(optionsCallback).toHaveBeenCalledTimes(1); + expect(optionsCallback).toHaveBeenLastCalledWith(MOCK_ENV); + }); + + test('merges options from env and callback', async () => { + const handler = { + email(_message, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.release).toBe('1.1.1'); + }); + + test('callback options take precedence over env options', async () => { + const handler = { + email(_message, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + release: '2.0.0', + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.release).toEqual('2.0.0'); + }); + + test('flushes the event after the handler is done using the cloudflare context.waitUntil', async () => { + const handler = { + email(_message, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const context = createMockExecutionContext(); + const waitUntilSpy = vi.spyOn(context, 'waitUntil'); + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, context); + + expect(waitUntilSpy).toHaveBeenCalledTimes(1); + expect(waitUntilSpy).toHaveBeenLastCalledWith(expect.any(Promise)); + }); + + test('creates a cloudflare client and sets it on the handler', async () => { + const initAndBindSpy = vi.spyOn(SentryCore, 'initAndBind'); + const handler = { + email(_message, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); + + expect(initAndBindSpy).toHaveBeenCalledTimes(1); + expect(initAndBindSpy).toHaveBeenLastCalledWith(CloudflareClient, expect.any(Object)); + }); + + describe('scope instrumentation', () => { + test('adds cloud resource context', async () => { + const handler = { + email(_message, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.contexts?.cloud_resource).toEqual({ 'cloud.provider': 'cloudflare' }); + }); + }); + + describe('error instrumentation', () => { + test('captures errors thrown by the handler', async () => { + const captureExceptionSpy = vi.spyOn(SentryCore, 'captureException'); + const error = new Error('test'); + + expect(captureExceptionSpy).not.toHaveBeenCalled(); + + const handler = { + email(_message, _env, _context) { + throw error; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + try { + await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); + } catch { + // ignore + } + + expect(captureExceptionSpy).toHaveBeenCalledTimes(1); + expect(captureExceptionSpy).toHaveBeenLastCalledWith(error, { + mechanism: { handled: false, type: 'auto.faas.cloudflare.email' }, + }); + }); + + test('re-throws the error after capturing', async () => { + const error = new Error('test'); + const handler = { + email(_message, _env, _context) { + throw error; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + + let thrownError: Error | undefined; + try { + await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV, createMockExecutionContext()); + } catch (e: any) { + thrownError = e; + } + + expect(thrownError).toBe(error); + }); + }); + + describe('tracing instrumentation', () => { + test('creates a span that wraps email invocation', async () => { + const handler = { + email(_message, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + tracesSampleRate: 1, + beforeSendTransaction(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + + const emailMessage = createMockEmailMessage(); + await wrappedHandler.email?.(emailMessage, MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.transaction).toEqual(`Handle Email ${emailMessage.to}`); + expect(sentryEvent.spans).toHaveLength(0); + expect(sentryEvent.contexts?.trace).toEqual({ + data: { + 'sentry.origin': 'auto.faas.cloudflare.email', + 'sentry.op': 'faas.email', + 'faas.trigger': 'email', + 'sentry.sample_rate': 1, + 'sentry.source': 'task', + }, + op: 'faas.email', + origin: 'auto.faas.cloudflare.email', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + }); + }); + }); + + test('flush must be called when all waitUntil are done', async () => { + const flush = vi.spyOn(SentryCore.Client.prototype, 'flush'); + vi.useFakeTimers(); + onTestFinished(() => { + vi.useRealTimers(); + }); + const handler = { + email(_controller, _env, _context) { + addDelayedWaitUntil(_context); + return; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(vi.fn(), handler); + const waits: Promise[] = []; + const waitUntil = vi.fn(promise => waits.push(promise)); + await wrappedHandler.email?.(createMockEmailMessage(), MOCK_ENV_WITHOUT_DSN, { + waitUntil, + } as unknown as ExecutionContext); + expect(flush).not.toBeCalled(); + expect(waitUntil).toBeCalled(); + vi.advanceTimersToNextTimer().runAllTimers(); + await Promise.all(waits); + expect(flush).toHaveBeenCalledOnce(); + }); +}); diff --git a/packages/cloudflare/test/instrumentations/worker/instrumentFetch.test.ts b/packages/cloudflare/test/instrumentations/worker/instrumentFetch.test.ts new file mode 100644 index 000000000000..1ae4b965d238 --- /dev/null +++ b/packages/cloudflare/test/instrumentations/worker/instrumentFetch.test.ts @@ -0,0 +1,156 @@ +// Note: These tests run the handler in Node.js, which has some differences to the cloudflare workers runtime. +// Although this is not ideal, this is the best we can do until we have a better way to test cloudflare workers. + +import type { ExecutionContext } from '@cloudflare/workers-types'; +import type { Event } from '@sentry/core'; +import * as SentryCore from '@sentry/core'; +import { beforeEach, describe, expect, onTestFinished, test, vi } from 'vitest'; +import { withSentry } from '../../../src/withSentry'; + +const MOCK_ENV = { + SENTRY_DSN: 'https://public@dsn.ingest.sentry.io/1337', + SENTRY_RELEASE: '1.1.1', +}; + +const MOCK_ENV_WITHOUT_DSN = { + SENTRY_RELEASE: '1.1.1', +}; + +function createMockExecutionContext(): ExecutionContext { + return { + waitUntil: vi.fn(), + passThroughOnException: vi.fn(), + }; +} + +function addDelayedWaitUntil(context: ExecutionContext) { + context.waitUntil(new Promise(resolve => setTimeout(() => resolve()))); +} + +describe('instrumentFetch', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + test('executes options callback with env', async () => { + const handler = { + fetch(_request, _env, _context) { + return new Response('test'); + }, + } satisfies ExportedHandler; + + const optionsCallback = vi.fn().mockReturnValue({}); + + const wrappedHandler = withSentry(optionsCallback, handler); + await wrappedHandler.fetch?.(new Request('https://example.com'), MOCK_ENV, createMockExecutionContext()); + + expect(optionsCallback).toHaveBeenCalledTimes(1); + expect(optionsCallback).toHaveBeenLastCalledWith(MOCK_ENV); + }); + + test('passes through the handler response', async () => { + const response = new Response('test'); + const handler = { + async fetch(_request, _env, _context) { + return response; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + const result = await wrappedHandler.fetch?.( + new Request('https://example.com'), + MOCK_ENV, + createMockExecutionContext(), + ); + + expect(result?.status).toBe(response.status); + if (result) { + expect(await result.text()).toBe('test'); + } + }); + + test('merges options from env and callback', async () => { + const handler = { + fetch(_request, _env, _context) { + throw new Error('test'); + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + + try { + await wrappedHandler.fetch?.(new Request('https://example.com'), MOCK_ENV, createMockExecutionContext()); + } catch { + // ignore + } + + expect(sentryEvent.release).toEqual('1.1.1'); + }); + + test('callback options take precedence over env options', async () => { + const handler = { + fetch(_request, _env, _context) { + throw new Error('test'); + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + release: '2.0.0', + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + + try { + await wrappedHandler.fetch?.(new Request('https://example.com'), MOCK_ENV, createMockExecutionContext()); + } catch { + // ignore + } + + expect(sentryEvent.release).toEqual('2.0.0'); + }); + + test('flush must be called when all waitUntil are done', async () => { + const flush = vi.spyOn(SentryCore.Client.prototype, 'flush'); + vi.useFakeTimers(); + onTestFinished(() => { + vi.useRealTimers(); + }); + const handler = { + fetch(_request, _env, _context) { + addDelayedWaitUntil(_context); + return new Response('test'); + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(vi.fn(), handler); + const waits: Promise[] = []; + const waitUntil = vi.fn(promise => waits.push(promise)); + await wrappedHandler.fetch?.(new Request('https://example.com'), MOCK_ENV_WITHOUT_DSN, { + waitUntil, + } as unknown as ExecutionContext); + expect(flush).not.toBeCalled(); + expect(waitUntil).toBeCalled(); + vi.advanceTimersToNextTimer().runAllTimers(); + await Promise.all(waits); + expect(flush).toHaveBeenCalledOnce(); + }); +}); diff --git a/packages/cloudflare/test/instrumentations/worker/instrumentQueue.test.ts b/packages/cloudflare/test/instrumentations/worker/instrumentQueue.test.ts new file mode 100644 index 000000000000..a38a8f24d79e --- /dev/null +++ b/packages/cloudflare/test/instrumentations/worker/instrumentQueue.test.ts @@ -0,0 +1,302 @@ +// Note: These tests run the handler in Node.js, which has some differences to the cloudflare workers runtime. +// Although this is not ideal, this is the best we can do until we have a better way to test cloudflare workers. + +import type { ExecutionContext, MessageBatch } from '@cloudflare/workers-types'; +import type { Event } from '@sentry/core'; +import * as SentryCore from '@sentry/core'; +import { beforeEach, describe, expect, onTestFinished, test, vi } from 'vitest'; +import { CloudflareClient } from '../../../src/client'; +import { withSentry } from '../../../src/withSentry'; + +const MOCK_ENV = { + SENTRY_DSN: 'https://public@dsn.ingest.sentry.io/1337', + SENTRY_RELEASE: '1.1.1', +}; + +const MOCK_ENV_WITHOUT_DSN = { + SENTRY_RELEASE: '1.1.1', +}; + +function createMockExecutionContext(): ExecutionContext { + return { + waitUntil: vi.fn(), + passThroughOnException: vi.fn(), + }; +} + +function createMockQueueBatch(): MessageBatch { + return { + queue: 'test-queue', + messages: [ + { + id: '1', + timestamp: new Date(), + body: 'test message 1', + attempts: 1, + retry: vi.fn(), + ack: vi.fn(), + }, + { + id: '2', + timestamp: new Date(), + body: 'test message 2', + attempts: 2, + retry: vi.fn(), + ack: vi.fn(), + }, + ], + retryAll: vi.fn(), + ackAll: vi.fn(), + }; +} + +function addDelayedWaitUntil(context: ExecutionContext) { + context.waitUntil(new Promise(resolve => setTimeout(() => resolve()))); +} + +describe('instrumentQueue', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + test('executes options callback with env', async () => { + const handler = { + queue(_batch, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const optionsCallback = vi.fn().mockReturnValue({}); + + const wrappedHandler = withSentry(optionsCallback, handler); + await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); + + expect(optionsCallback).toHaveBeenCalledTimes(1); + expect(optionsCallback).toHaveBeenLastCalledWith(MOCK_ENV); + }); + + test('merges options from env and callback', async () => { + const handler = { + queue(_batch, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.release).toBe('1.1.1'); + }); + + test('callback options take precedence over env options', async () => { + const handler = { + queue(_batch, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + release: '2.0.0', + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.release).toEqual('2.0.0'); + }); + + test('flushes the event after the handler is done using the cloudflare context.waitUntil', async () => { + const handler = { + queue(_batch, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const context = createMockExecutionContext(); + const waitUntilSpy = vi.spyOn(context, 'waitUntil'); + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, context); + + expect(waitUntilSpy).toHaveBeenCalledTimes(1); + expect(waitUntilSpy).toHaveBeenLastCalledWith(expect.any(Promise)); + }); + + test('creates a cloudflare client and sets it on the handler', async () => { + const initAndBindSpy = vi.spyOn(SentryCore, 'initAndBind'); + const handler = { + queue(_batch, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); + + expect(initAndBindSpy).toHaveBeenCalledTimes(1); + expect(initAndBindSpy).toHaveBeenLastCalledWith(CloudflareClient, expect.any(Object)); + }); + + describe('scope instrumentation', () => { + test('adds cloud resource context', async () => { + const handler = { + queue(_batch, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.contexts?.cloud_resource).toEqual({ 'cloud.provider': 'cloudflare' }); + }); + }); + + describe('error instrumentation', () => { + test('captures errors thrown by the handler', async () => { + const captureExceptionSpy = vi.spyOn(SentryCore, 'captureException'); + const error = new Error('test'); + + expect(captureExceptionSpy).not.toHaveBeenCalled(); + + const handler = { + queue(_batch, _env, _context) { + throw error; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + try { + await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); + } catch { + // ignore + } + + expect(captureExceptionSpy).toHaveBeenCalledTimes(1); + expect(captureExceptionSpy).toHaveBeenLastCalledWith(error, { + mechanism: { handled: false, type: 'auto.faas.cloudflare.queue' }, + }); + }); + + test('re-throws the error after capturing', async () => { + const error = new Error('test'); + const handler = { + queue(_batch, _env, _context) { + throw error; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + + let thrownError: Error | undefined; + try { + await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV, createMockExecutionContext()); + } catch (e: any) { + thrownError = e; + } + + expect(thrownError).toBe(error); + }); + }); + + describe('tracing instrumentation', () => { + test('creates a span that wraps queue invocation with correct attributes', async () => { + const handler = { + queue(_batch, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + tracesSampleRate: 1, + beforeSendTransaction(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + + const batch = createMockQueueBatch(); + await wrappedHandler.queue?.(batch, MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.transaction).toEqual(`process ${batch.queue}`); + expect(sentryEvent.spans).toHaveLength(0); + expect(sentryEvent.contexts?.trace).toEqual({ + data: { + 'sentry.origin': 'auto.faas.cloudflare.queue', + 'sentry.op': 'queue.process', + 'faas.trigger': 'pubsub', + 'messaging.destination.name': batch.queue, + 'messaging.system': 'cloudflare', + 'messaging.batch.message_count': batch.messages.length, + 'messaging.message.retry.count': batch.messages.reduce((acc, message) => acc + message.attempts - 1, 0), + 'sentry.sample_rate': 1, + 'sentry.source': 'task', + }, + op: 'queue.process', + origin: 'auto.faas.cloudflare.queue', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + }); + }); + }); + + test('flush must be called when all waitUntil are done', async () => { + const flush = vi.spyOn(SentryCore.Client.prototype, 'flush'); + vi.useFakeTimers(); + onTestFinished(() => { + vi.useRealTimers(); + }); + const handler = { + queue(_controller, _env, _context) { + addDelayedWaitUntil(_context); + return; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(vi.fn(), handler); + const waits: Promise[] = []; + const waitUntil = vi.fn(promise => waits.push(promise)); + await wrappedHandler.queue?.(createMockQueueBatch(), MOCK_ENV_WITHOUT_DSN, { + waitUntil, + } as unknown as ExecutionContext); + expect(flush).not.toBeCalled(); + expect(waitUntil).toBeCalled(); + vi.advanceTimersToNextTimer().runAllTimers(); + await Promise.all(waits); + expect(flush).toHaveBeenCalledOnce(); + }); +}); diff --git a/packages/cloudflare/test/instrumentations/worker/instrumentScheduled.test.ts b/packages/cloudflare/test/instrumentations/worker/instrumentScheduled.test.ts new file mode 100644 index 000000000000..64833d70ddfb --- /dev/null +++ b/packages/cloudflare/test/instrumentations/worker/instrumentScheduled.test.ts @@ -0,0 +1,281 @@ +// Note: These tests run the handler in Node.js, which has some differences to the cloudflare workers runtime. +// Although this is not ideal, this is the best we can do until we have a better way to test cloudflare workers. + +import type { ExecutionContext, ScheduledController } from '@cloudflare/workers-types'; +import type { Event } from '@sentry/core'; +import * as SentryCore from '@sentry/core'; +import { beforeEach, describe, expect, onTestFinished, test, vi } from 'vitest'; +import { CloudflareClient } from '../../../src/client'; +import { withSentry } from '../../../src/withSentry'; + +const MOCK_ENV = { + SENTRY_DSN: 'https://public@dsn.ingest.sentry.io/1337', + SENTRY_RELEASE: '1.1.1', +}; + +const MOCK_ENV_WITHOUT_DSN = { + SENTRY_RELEASE: '1.1.1', +}; + +function createMockExecutionContext(): ExecutionContext { + return { + waitUntil: vi.fn(), + passThroughOnException: vi.fn(), + }; +} + +function createMockScheduledController(): ScheduledController { + return { + scheduledTime: 123, + cron: '0 0 0 * * *', + noRetry: vi.fn(), + }; +} + +function addDelayedWaitUntil(context: ExecutionContext) { + context.waitUntil(new Promise(resolve => setTimeout(() => resolve()))); +} + +describe('instrumentScheduled', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + test('executes options callback with env', async () => { + const handler = { + scheduled(_controller, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const optionsCallback = vi.fn().mockReturnValue({}); + + const wrappedHandler = withSentry(optionsCallback, handler); + await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); + + expect(optionsCallback).toHaveBeenCalledTimes(1); + expect(optionsCallback).toHaveBeenLastCalledWith(MOCK_ENV); + }); + + test('merges options from env and callback', async () => { + const handler = { + scheduled(_controller, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.release).toBe('1.1.1'); + }); + + test('callback options take precedence over env options', async () => { + const handler = { + scheduled(_controller, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + release: '2.0.0', + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.release).toEqual('2.0.0'); + }); + + test('flushes the event after the handler is done using the cloudflare context.waitUntil', async () => { + const handler = { + scheduled(_controller, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const context = createMockExecutionContext(); + const waitUntilSpy = vi.spyOn(context, 'waitUntil'); + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, context); + + expect(waitUntilSpy).toHaveBeenCalledTimes(1); + expect(waitUntilSpy).toHaveBeenLastCalledWith(expect.any(Promise)); + }); + + test('creates a cloudflare client and sets it on the handler', async () => { + const initAndBindSpy = vi.spyOn(SentryCore, 'initAndBind'); + const handler = { + scheduled(_controller, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); + + expect(initAndBindSpy).toHaveBeenCalledTimes(1); + expect(initAndBindSpy).toHaveBeenLastCalledWith(CloudflareClient, expect.any(Object)); + }); + + describe('scope instrumentation', () => { + test('adds cloud resource context', async () => { + const handler = { + scheduled(_controller, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.contexts?.cloud_resource).toEqual({ 'cloud.provider': 'cloudflare' }); + }); + }); + + describe('error instrumentation', () => { + test('captures errors thrown by the handler', async () => { + const captureExceptionSpy = vi.spyOn(SentryCore, 'captureException'); + const error = new Error('test'); + + expect(captureExceptionSpy).not.toHaveBeenCalled(); + + const handler = { + scheduled(_controller, _env, _context) { + throw error; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + try { + await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); + } catch { + // ignore + } + + expect(captureExceptionSpy).toHaveBeenCalledTimes(1); + expect(captureExceptionSpy).toHaveBeenLastCalledWith(error, { + mechanism: { handled: false, type: 'auto.faas.cloudflare.scheduled' }, + }); + }); + + test('re-throws the error after capturing', async () => { + const error = new Error('test'); + const handler = { + scheduled(_controller, _env, _context) { + throw error; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + + let thrownError: Error | undefined; + try { + await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); + } catch (e: any) { + thrownError = e; + } + + expect(thrownError).toBe(error); + }); + }); + + describe('tracing instrumentation', () => { + test('creates a span that wraps scheduled invocation', async () => { + const handler = { + scheduled(_controller, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + tracesSampleRate: 1, + beforeSendTransaction(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + + await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.transaction).toEqual('Scheduled Cron 0 0 0 * * *'); + expect(sentryEvent.spans).toHaveLength(0); + expect(sentryEvent.contexts?.trace).toEqual({ + data: { + 'sentry.origin': 'auto.faas.cloudflare.scheduled', + 'sentry.op': 'faas.cron', + 'faas.cron': '0 0 0 * * *', + 'faas.time': expect.any(String), + 'faas.trigger': 'timer', + 'sentry.sample_rate': 1, + 'sentry.source': 'task', + }, + op: 'faas.cron', + origin: 'auto.faas.cloudflare.scheduled', + span_id: expect.stringMatching(/[a-f0-9]{16}/), + trace_id: expect.stringMatching(/[a-f0-9]{32}/), + }); + }); + }); + + test('flush must be called when all waitUntil are done', async () => { + const flush = vi.spyOn(SentryCore.Client.prototype, 'flush'); + vi.useFakeTimers(); + onTestFinished(() => { + vi.useRealTimers(); + }); + const handler = { + scheduled(_controller, _env, _context) { + addDelayedWaitUntil(_context); + return; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(vi.fn(), handler); + const waits: Promise[] = []; + const waitUntil = vi.fn(promise => waits.push(promise)); + await wrappedHandler.scheduled?.(createMockScheduledController(), MOCK_ENV_WITHOUT_DSN, { + waitUntil, + } as unknown as ExecutionContext); + expect(flush).not.toBeCalled(); + expect(waitUntil).toBeCalled(); + vi.advanceTimersToNextTimer().runAllTimers(); + await Promise.all(waits); + expect(flush).toHaveBeenCalledOnce(); + }); +}); diff --git a/packages/cloudflare/test/instrumentations/worker/instrumentTail.test.ts b/packages/cloudflare/test/instrumentations/worker/instrumentTail.test.ts new file mode 100644 index 000000000000..f85507e2c734 --- /dev/null +++ b/packages/cloudflare/test/instrumentations/worker/instrumentTail.test.ts @@ -0,0 +1,258 @@ +// Note: These tests run the handler in Node.js, which has some differences to the cloudflare workers runtime. +// Although this is not ideal, this is the best we can do until we have a better way to test cloudflare workers. + +import type { ExecutionContext, TraceItem } from '@cloudflare/workers-types'; +import type { Event } from '@sentry/core'; +import * as SentryCore from '@sentry/core'; +import { beforeEach, describe, expect, onTestFinished, test, vi } from 'vitest'; +import { CloudflareClient } from '../../../src/client'; +import { withSentry } from '../../../src/withSentry'; + +const MOCK_ENV = { + SENTRY_DSN: 'https://public@dsn.ingest.sentry.io/1337', + SENTRY_RELEASE: '1.1.1', +}; + +const MOCK_ENV_WITHOUT_DSN = { + SENTRY_RELEASE: '1.1.1', +}; + +function createMockExecutionContext(): ExecutionContext { + return { + waitUntil: vi.fn(), + passThroughOnException: vi.fn(), + }; +} + +function createMockTailEvent(): TraceItem[] { + return [ + { + event: { + consumedEvents: [ + { + scriptName: 'test-script', + }, + ], + }, + eventTimestamp: Date.now(), + logs: [ + { + timestamp: Date.now(), + level: 'info', + message: 'Test log message', + }, + ], + exceptions: [], + diagnosticsChannelEvents: [], + scriptName: 'test-script', + outcome: 'ok', + truncated: false, + }, + ]; +} + +function addDelayedWaitUntil(context: ExecutionContext) { + context.waitUntil(new Promise(resolve => setTimeout(() => resolve()))); +} + +describe('instrumentTail', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + test('executes options callback with env', async () => { + const handler = { + tail(_event, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const optionsCallback = vi.fn().mockReturnValue({}); + + const wrappedHandler = withSentry(optionsCallback, handler); + await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); + + expect(optionsCallback).toHaveBeenCalledTimes(1); + expect(optionsCallback).toHaveBeenLastCalledWith(MOCK_ENV); + }); + + test('merges options from env and callback', async () => { + const handler = { + tail(_event, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.release).toBe('1.1.1'); + }); + + test('callback options take precedence over env options', async () => { + const handler = { + tail(_event, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + release: '2.0.0', + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.release).toEqual('2.0.0'); + }); + + test('flushes the event after the handler is done using the cloudflare context.waitUntil', async () => { + const handler = { + tail(_event, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const context = createMockExecutionContext(); + const waitUntilSpy = vi.spyOn(context, 'waitUntil'); + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, context); + + expect(waitUntilSpy).toHaveBeenCalledTimes(1); + expect(waitUntilSpy).toHaveBeenLastCalledWith(expect.any(Promise)); + }); + + test('creates a cloudflare client and sets it on the handler', async () => { + const initAndBindSpy = vi.spyOn(SentryCore, 'initAndBind'); + const handler = { + tail(_event, _env, _context) { + return; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); + + expect(initAndBindSpy).toHaveBeenCalledTimes(1); + expect(initAndBindSpy).toHaveBeenLastCalledWith(CloudflareClient, expect.any(Object)); + }); + + describe('scope instrumentation', () => { + test('adds cloud resource context', async () => { + const handler = { + tail(_event, _env, _context) { + SentryCore.captureMessage('cloud_resource'); + return; + }, + } satisfies ExportedHandler; + + let sentryEvent: Event = {}; + const wrappedHandler = withSentry( + env => ({ + dsn: env.SENTRY_DSN, + beforeSend(event) { + sentryEvent = event; + return null; + }, + }), + handler, + ); + await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); + + expect(sentryEvent.contexts?.cloud_resource).toEqual({ 'cloud.provider': 'cloudflare' }); + }); + }); + + describe('error instrumentation', () => { + test('captures errors thrown by the handler', async () => { + const captureExceptionSpy = vi.spyOn(SentryCore, 'captureException'); + const error = new Error('test'); + + expect(captureExceptionSpy).not.toHaveBeenCalled(); + + const handler = { + tail(_event, _env, _context) { + throw error; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + try { + await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); + } catch { + // ignore + } + + expect(captureExceptionSpy).toHaveBeenCalledTimes(1); + expect(captureExceptionSpy).toHaveBeenLastCalledWith(error, { + mechanism: { handled: false, type: 'auto.faas.cloudflare.tail' }, + }); + }); + + test('re-throws the error after capturing', async () => { + const error = new Error('test'); + const handler = { + tail(_event, _env, _context) { + throw error; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(env => ({ dsn: env.SENTRY_DSN }), handler); + + let thrownError: Error | undefined; + try { + await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV, createMockExecutionContext()); + } catch (e: any) { + thrownError = e; + } + + expect(thrownError).toBe(error); + }); + }); + + test('flush must be called when all waitUntil are done', async () => { + const flush = vi.spyOn(SentryCore.Client.prototype, 'flush'); + vi.useFakeTimers(); + onTestFinished(() => { + vi.useRealTimers(); + flush.mockRestore(); + }); + const handler = { + tail(_controller, _env, _context) { + addDelayedWaitUntil(_context); + return; + }, + } satisfies ExportedHandler; + + const wrappedHandler = withSentry(vi.fn(), handler); + const waits: Promise[] = []; + const waitUntil = vi.fn(promise => waits.push(promise)); + await wrappedHandler.tail?.(createMockTailEvent(), MOCK_ENV_WITHOUT_DSN, { + waitUntil, + } as unknown as ExecutionContext); + expect(flush).not.toBeCalled(); + expect(waitUntil).toBeCalled(); + vi.advanceTimersToNextTimer().runAllTimers(); + await Promise.all(waits); + expect(flush).toHaveBeenCalledOnce(); + }); +}); diff --git a/packages/cloudflare/test/withSentry.test.ts b/packages/cloudflare/test/withSentry.test.ts new file mode 100644 index 000000000000..5b1f3ca9b17d --- /dev/null +++ b/packages/cloudflare/test/withSentry.test.ts @@ -0,0 +1,103 @@ +// Note: These tests run the handler in Node.js, which has some differences to the cloudflare workers runtime. +// Although this is not ideal, this is the best we can do until we have a better way to test cloudflare workers. + +import * as SentryCore from '@sentry/core'; +import { beforeEach, describe, expect, test, vi } from 'vitest'; +import { withSentry } from '../src/withSentry'; +import { markAsInstrumented } from '../src/instrument'; +import * as HonoIntegration from '../src/integrations/hono'; + +type HonoLikeApp = ExportedHandler< + Env, + QueueHandlerMessage, + CfHostMetadata +> & { + onError?: () => void; + errorHandler?: (err: Error) => Response; +}; + +const MOCK_ENV = { + SENTRY_DSN: 'https://public@dsn.ingest.sentry.io/1337', + SENTRY_RELEASE: '1.1.1', +}; + +describe('withSentry', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + describe('hono errorHandler', () => { + test('calls Hono Integration to handle error captured by the errorHandler', async () => { + const error = new Error('test hono error'); + + const handleHonoException = vi.fn(); + vi.spyOn(HonoIntegration, 'getHonoIntegration').mockReturnValue({ handleHonoException } as any); + + const honoApp = { + fetch(_request, _env, _context) { + return new Response('test'); + }, + onError() {}, + errorHandler(err: Error) { + return new Response(`Error: ${err.message}`, { status: 500 }); + }, + } satisfies HonoLikeApp; + + withSentry(env => ({ dsn: env.SENTRY_DSN }), honoApp); + + const errorHandlerResponse = honoApp.errorHandler?.(error); + + expect(handleHonoException).toHaveBeenCalledTimes(1); + expect(handleHonoException).toHaveBeenLastCalledWith(error, undefined); + expect(errorHandlerResponse?.status).toBe(500); + }); + + test('preserves the original errorHandler functionality', async () => { + const originalErrorHandlerSpy = vi.fn().mockImplementation((err: Error) => { + return new Response(`Error: ${err.message}`, { status: 500 }); + }); + + const error = new Error('test hono error'); + + const honoApp = { + fetch(_request, _env, _context) { + return new Response('test'); + }, + onError() {}, + errorHandler: originalErrorHandlerSpy, + } satisfies HonoLikeApp; + + withSentry(env => ({ dsn: env.SENTRY_DSN }), honoApp); + + const errorHandlerResponse = honoApp.errorHandler?.(error); + + expect(originalErrorHandlerSpy).toHaveBeenCalledTimes(1); + expect(originalErrorHandlerSpy).toHaveBeenLastCalledWith(error); + expect(errorHandlerResponse?.status).toBe(500); + }); + + test('does not instrument an already instrumented errorHandler', async () => { + const captureExceptionSpy = vi.spyOn(SentryCore, 'captureException'); + const error = new Error('test hono error'); + + const originalErrorHandler = (err: Error) => { + return new Response(`Error: ${err.message}`, { status: 500 }); + }; + + markAsInstrumented(originalErrorHandler); + + const honoApp = { + fetch(_request, _env, _context) { + return new Response('test'); + }, + onError() {}, + errorHandler: originalErrorHandler, + } satisfies HonoLikeApp; + + withSentry(env => ({ dsn: env.SENTRY_DSN }), honoApp); + + honoApp.errorHandler?.(error); + expect(captureExceptionSpy).not.toHaveBeenCalled(); + }); + }); +}); diff --git a/packages/cloudflare/test/workflow.test.ts b/packages/cloudflare/test/workflow.test.ts index fa922d7233e0..b460e6bfee5a 100644 --- a/packages/cloudflare/test/workflow.test.ts +++ b/packages/cloudflare/test/workflow.test.ts @@ -26,7 +26,7 @@ const mockStep: WorkflowStep = { } else { return await (maybeCallback ? maybeCallback() : Promise.resolve()); } - } catch (error) { + } catch { await new Promise(resolve => setTimeout(resolve, 1000)); } } diff --git a/packages/cloudflare/test/wrapMethodWithSentry.test.ts b/packages/cloudflare/test/wrapMethodWithSentry.test.ts index a7e73a83cd39..c831bd01a6bb 100644 --- a/packages/cloudflare/test/wrapMethodWithSentry.test.ts +++ b/packages/cloudflare/test/wrapMethodWithSentry.test.ts @@ -1,25 +1,31 @@ import * as sentryCore from '@sentry/core'; import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; import { isInstrumented } from '../src/instrument'; +import * as sdk from '../src/sdk'; import { wrapMethodWithSentry } from '../src/wrapMethodWithSentry'; -// Mock the SDK init to avoid actual SDK initialization -vi.mock('../src/sdk', () => ({ - init: vi.fn(() => ({ +function createMockClient(hasTransport: boolean = true) { + return { getOptions: () => ({}), on: vi.fn(), dispose: vi.fn(), - })), + getTransport: vi.fn().mockReturnValue(hasTransport ? { send: vi.fn() } : undefined), + }; +} + +// Mock the SDK init to avoid actual SDK initialization +vi.mock('../src/sdk', () => ({ + init: vi.fn(() => createMockClient(true)), })); // Mock sentry/core functions vi.mock('@sentry/core', async importOriginal => { - const actual = await importOriginal(); + const actual = await importOriginal(); return { ...actual, getClient: vi.fn(), - withIsolationScope: vi.fn((callback: (scope: any) => any) => callback(createMockScope())), - withScope: vi.fn((callback: (scope: any) => any) => callback(createMockScope())), + withIsolationScope: vi.fn((callback: (scope: unknown) => unknown) => callback(createMockScope())), + withScope: vi.fn((callback: (scope: unknown) => unknown) => callback(createMockScope())), startSpan: vi.fn((opts, callback) => callback(createMockSpan())), captureException: vi.fn(), flush: vi.fn().mockResolvedValue(true), @@ -27,6 +33,8 @@ vi.mock('@sentry/core', async importOriginal => { }; }); +const mockedWithIsolationScope = vi.mocked(sentryCore.withIsolationScope); + function createMockScope() { return { getClient: vi.fn(), @@ -307,4 +315,90 @@ describe('wrapMethodWithSentry', () => { expect(handler.mock.instances[0]).toBe(thisArg); }); }); + + describe('client re-initialization', () => { + it('creates a new client when scope has no client', async () => { + const scope = new sentryCore.Scope(); + + mockedWithIsolationScope.mockImplementation(vi.fn(callback => callback(scope))); + + const spyClient = vi.spyOn(scope, 'setClient'); + const handler = vi.fn().mockResolvedValue('result'); + const options = { + options: { dsn: 'https://test@sentry.io/123' }, + context: createMockContext(), + }; + + const wrapped = wrapMethodWithSentry(options, handler); + + await wrapped(); + + expect(sdk.init).toHaveBeenCalledWith( + expect.objectContaining({ + dsn: 'https://test@sentry.io/123', + }), + ); + expect(spyClient).toHaveBeenCalled(); + }); + + it('creates a new client when existing client has no transport (disposed)', async () => { + const disposedClient = { + getOptions: () => ({}), + on: vi.fn(), + dispose: vi.fn(), + getTransport: vi.fn().mockReturnValue(undefined), + } as unknown as sentryCore.Client; + + const scope = new sentryCore.Scope(); + + scope.setClient(disposedClient); + mockedWithIsolationScope.mockImplementation(vi.fn(callback => callback(scope))); + + const spyClient = vi.spyOn(scope, 'setClient'); + const handler = vi.fn().mockResolvedValue('result'); + const options = { + options: { dsn: 'https://test@sentry.io/123' }, + context: createMockContext(), + }; + + const wrapped = wrapMethodWithSentry(options, handler); + await wrapped(); + + expect(sdk.init).toHaveBeenCalledWith( + expect.objectContaining({ + dsn: 'https://test@sentry.io/123', + }), + ); + expect(spyClient).toHaveBeenCalled(); + }); + + it('does not create a new client when existing client has valid transport', async () => { + const validClient = { + getOptions: () => ({}), + on: vi.fn(), + dispose: vi.fn(), + getTransport: vi.fn().mockReturnValue({ send: vi.fn() }), + } as unknown as sentryCore.Client; + + const scope = new sentryCore.Scope(); + + scope.setClient(validClient); + mockedWithIsolationScope.mockImplementation(vi.fn(callback => callback(scope))); + vi.mocked(sdk.init).mockClear(); + + const spyClient = vi.spyOn(scope, 'setClient'); + const handler = vi.fn().mockResolvedValue('result'); + const options = { + options: { dsn: 'https://test@sentry.io/123' }, + context: createMockContext(), + }; + + const wrapped = wrapMethodWithSentry(options, handler); + + await wrapped(); + + expect(sdk.init).not.toHaveBeenCalled(); + expect(spyClient).not.toHaveBeenCalled(); + }); + }); }); diff --git a/packages/core/package.json b/packages/core/package.json index e92a33049701..9dee4ca63119 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -51,8 +51,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-core-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/core/src/client.ts b/packages/core/src/client.ts index 3afe8fa2442c..8d69411aacfd 100644 --- a/packages/core/src/client.ts +++ b/packages/core/src/client.ts @@ -1321,7 +1321,7 @@ export abstract class Client { throw _makeDoNotSendEventError('An event processor returned `null`, will not send event.'); } - const isInternalException = hint.data && (hint.data as { __sentry__: boolean }).__sentry__ === true; + const isInternalException = (hint.data as { __sentry__: boolean })?.__sentry__ === true; if (isInternalException) { return prepared; } diff --git a/packages/core/src/integrations/mcp-server/handlers.ts b/packages/core/src/integrations/mcp-server/handlers.ts index 9816d607b7c1..dd8e0296a95e 100644 --- a/packages/core/src/integrations/mcp-server/handlers.ts +++ b/packages/core/src/integrations/mcp-server/handlers.ts @@ -121,7 +121,7 @@ function captureHandlerError(error: Error, methodName: keyof MCPServerInstance, extraData.prompt_name = handlerName; captureError(error, 'prompt_execution', extraData); } - } catch (captureErr) { + } catch (_captureErr) { // noop } } diff --git a/packages/core/src/logs/internal.ts b/packages/core/src/logs/internal.ts index 3408b01a5f96..097ffbb6906e 100644 --- a/packages/core/src/logs/internal.ts +++ b/packages/core/src/logs/internal.ts @@ -10,6 +10,7 @@ import { isParameterizedString } from '../utils/is'; import { getCombinedScopeData } from '../utils/scopeData'; import { _getSpanForScope } from '../utils/spanOnScope'; import { timestampInSeconds } from '../utils/time'; +import { getSequenceAttribute } from '../utils/timestampSequence'; import { _getTraceInfoFromScope } from '../utils/trace-info'; import { SEVERITY_TEXT_TO_SEVERITY_NUMBER } from './constants'; import { createLogEnvelope } from './envelope'; @@ -154,8 +155,11 @@ export function _INTERNAL_captureLog( const { level, message, attributes: logAttributes = {}, severityNumber } = log; + const timestamp = timestampInSeconds(); + const sequenceAttr = getSequenceAttribute(timestamp); + const serializedLog: SerializedLog = { - timestamp: timestampInSeconds(), + timestamp, level, body: message, trace_id: traceContext?.trace_id, @@ -163,6 +167,7 @@ export function _INTERNAL_captureLog( attributes: { ...serializeAttributes(scopeAttributes), ...serializeAttributes(logAttributes, true), + [sequenceAttr.key]: sequenceAttr.value, }, }; diff --git a/packages/core/src/metrics/internal.ts b/packages/core/src/metrics/internal.ts index bdd13d884967..0545414654ef 100644 --- a/packages/core/src/metrics/internal.ts +++ b/packages/core/src/metrics/internal.ts @@ -11,6 +11,7 @@ import { debug } from '../utils/debug-logger'; import { getCombinedScopeData } from '../utils/scopeData'; import { _getSpanForScope } from '../utils/spanOnScope'; import { timestampInSeconds } from '../utils/time'; +import { getSequenceAttribute } from '../utils/timestampSequence'; import { _getTraceInfoFromScope } from '../utils/trace-info'; import { createMetricEnvelope } from './envelope'; @@ -135,8 +136,11 @@ function _buildSerializedMetric( const traceId = span ? span.spanContext().traceId : traceContext?.trace_id; const spanId = span ? span.spanContext().spanId : undefined; + const timestamp = timestampInSeconds(); + const sequenceAttr = getSequenceAttribute(timestamp); + return { - timestamp: timestampInSeconds(), + timestamp, trace_id: traceId ?? '', span_id: spanId, name: metric.name, @@ -146,6 +150,7 @@ function _buildSerializedMetric( attributes: { ...serializeAttributes(scopeAttributes), ...serializeAttributes(metric.attributes, 'skip-undefined'), + [sequenceAttr.key]: sequenceAttr.value, }, }; } diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index dc88e6315852..cae9a9353da9 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -126,6 +126,14 @@ export const GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE = 'sentry.sdk_meta. */ export const GEN_AI_INPUT_MESSAGES_ATTRIBUTE = 'gen_ai.input.messages'; +/** + * The model's response messages including text and tool calls + * Only recorded when recordOutputs is enabled + * Format: stringified array of message objects with role, parts, and finish_reason + * @see https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-output-messages + */ +export const GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE = 'gen_ai.output.messages'; + /** * The system instructions extracted from system messages * Only recorded when recordInputs is enabled @@ -227,12 +235,12 @@ export const GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE = 'gen_ai.embeddings.input'; /** * The span operation name for embedding */ -export const GEN_AI_EMBED_DO_EMBED_OPERATION_ATTRIBUTE = 'gen_ai.embed'; +export const GEN_AI_EMBED_DO_EMBED_OPERATION_ATTRIBUTE = 'gen_ai.embeddings'; /** * The span operation name for embedding many */ -export const GEN_AI_EMBED_MANY_DO_EMBED_OPERATION_ATTRIBUTE = 'gen_ai.embed_many'; +export const GEN_AI_EMBED_MANY_DO_EMBED_OPERATION_ATTRIBUTE = 'gen_ai.embeddings'; /** * The span operation name for reranking @@ -269,6 +277,12 @@ export const GEN_AI_TOOL_INPUT_ATTRIBUTE = 'gen_ai.tool.input'; */ export const GEN_AI_TOOL_OUTPUT_ATTRIBUTE = 'gen_ai.tool.output'; +/** + * The description of the tool being used + * @see https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-tool-description + */ +export const GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE = 'gen_ai.tool.description'; + // ============================================================================= // OPENAI-SPECIFIC ATTRIBUTES // ============================================================================= diff --git a/packages/core/src/tracing/ai/mediaStripping.ts b/packages/core/src/tracing/ai/mediaStripping.ts index f4870cd5a9de..cb8e5d7b959e 100644 --- a/packages/core/src/tracing/ai/mediaStripping.ts +++ b/packages/core/src/tracing/ai/mediaStripping.ts @@ -47,6 +47,8 @@ export function isContentMedia(part: unknown): part is ContentMedia { hasInputAudio(part) || hasFileData(part) || hasMediaTypeData(part) || + hasVercelFileData(part) || + hasVercelImageData(part) || hasBlobOrBase64Type(part) || hasB64Json(part) || hasImageGenerationResult(part) || @@ -113,6 +115,41 @@ function hasMediaTypeData(part: NonNullable): part is { media_type: str return 'media_type' in part && typeof part.media_type === 'string' && 'data' in part; } +/** + * Check for Vercel AI SDK file format: { type: "file", mediaType: "...", data: "..." } + * Only matches base64/binary data, not HTTP/HTTPS URLs (which should be preserved). + */ +function hasVercelFileData(part: NonNullable): part is { type: 'file'; mediaType: string; data: string } { + return ( + 'type' in part && + part.type === 'file' && + 'mediaType' in part && + typeof part.mediaType === 'string' && + 'data' in part && + typeof part.data === 'string' && + // Only strip base64/binary data, not HTTP/HTTPS URLs which should be preserved as references + !part.data.startsWith('http://') && + !part.data.startsWith('https://') + ); +} + +/** + * Check for Vercel AI SDK image format: { type: "image", image: "base64...", mimeType?: "..." } + * Only matches base64/data URIs, not HTTP/HTTPS URLs (which should be preserved). + * Note: mimeType is optional in Vercel AI SDK image parts. + */ +function hasVercelImageData(part: NonNullable): part is { type: 'image'; image: string; mimeType?: string } { + return ( + 'type' in part && + part.type === 'image' && + 'image' in part && + typeof part.image === 'string' && + // Only strip base64/data URIs, not HTTP/HTTPS URLs which should be preserved as references + !part.image.startsWith('http://') && + !part.image.startsWith('https://') + ); +} + function hasBlobOrBase64Type(part: NonNullable): part is { type: 'blob' | 'base64'; content: string } { return 'type' in part && (part.type === 'blob' || part.type === 'base64'); } @@ -131,7 +168,7 @@ function hasDataUri(part: NonNullable): part is { uri: string } { const REMOVED_STRING = '[Blob substitute]'; -const MEDIA_FIELDS = ['image_url', 'data', 'content', 'b64_json', 'result', 'uri'] as const; +const MEDIA_FIELDS = ['image_url', 'data', 'content', 'b64_json', 'result', 'uri', 'image'] as const; /** * Replace inline binary data in a single media content part with a placeholder. diff --git a/packages/core/src/tracing/ai/messageTruncation.ts b/packages/core/src/tracing/ai/messageTruncation.ts index 499d25ee6e47..16df3c298466 100644 --- a/packages/core/src/tracing/ai/messageTruncation.ts +++ b/packages/core/src/tracing/ai/messageTruncation.ts @@ -232,8 +232,9 @@ function truncatePartsMessage(message: PartsMessage, maxBytes: number): unknown[ /** * Truncate a single message to fit within maxBytes. * - * Supports two message formats: + * Supports three message formats: * - OpenAI/Anthropic: `{ ..., content: string }` + * - Vercel AI/OpenAI multimodal: `{ ..., content: Array<{type, text?, ...}> }` * - Google GenAI: `{ ..., parts: Array }` * * @param message - The message to truncate @@ -257,6 +258,11 @@ function truncateSingleMessage(message: unknown, maxBytes: number): unknown[] { return truncateContentMessage(message, maxBytes); } + if (isContentArrayMessage(message)) { + // Content array messages are returned as-is without truncation + return [message]; + } + if (isPartsMessage(message)) { return truncatePartsMessage(message, maxBytes); } diff --git a/packages/core/src/tracing/ai/utils.ts b/packages/core/src/tracing/ai/utils.ts index 8f08b65c6171..46602a54553a 100644 --- a/packages/core/src/tracing/ai/utils.ts +++ b/packages/core/src/tracing/ai/utils.ts @@ -1,6 +1,7 @@ /** * Shared utils for AI integrations (OpenAI, Anthropic, Verce.AI, etc.) */ +import { getClient } from '../../currentScopes'; import type { Span } from '../../types-hoist/span'; import { GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, @@ -8,6 +9,25 @@ import { GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from './gen-ai-attributes'; import { truncateGenAiMessages, truncateGenAiStringInput } from './messageTruncation'; + +export interface AIRecordingOptions { + recordInputs?: boolean; + recordOutputs?: boolean; +} + +/** + * Resolves AI recording options by falling back to the client's `sendDefaultPii` setting. + * Precedence: explicit option > sendDefaultPii > false + */ +export function resolveAIRecordingOptions(options?: T): T & Required { + const sendDefaultPii = Boolean(getClient()?.getOptions().sendDefaultPii); + return { + ...options, + recordInputs: options?.recordInputs ?? sendDefaultPii, + recordOutputs: options?.recordOutputs ?? sendDefaultPii, + } as T & Required; +} + /** * Maps AI method paths to OpenTelemetry semantic convention operation names * @see https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#llm-request-spans diff --git a/packages/core/src/tracing/anthropic-ai/index.ts b/packages/core/src/tracing/anthropic-ai/index.ts index 63ff1be0e52f..f677fe5eb90f 100644 --- a/packages/core/src/tracing/anthropic-ai/index.ts +++ b/packages/core/src/tracing/anthropic-ai/index.ts @@ -1,4 +1,3 @@ -import { getClient } from '../../currentScopes'; import { captureException } from '../../exports'; import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; import { SPAN_STATUS_ERROR } from '../../tracing'; @@ -23,7 +22,13 @@ import { GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_SYSTEM_ATTRIBUTE, } from '../ai/gen-ai-attributes'; -import { buildMethodPath, getFinalOperationName, getSpanOperation, setTokenUsageAttributes } from '../ai/utils'; +import { + buildMethodPath, + getFinalOperationName, + getSpanOperation, + resolveAIRecordingOptions, + setTokenUsageAttributes, +} from '../ai/utils'; import { instrumentAsyncIterableStream, instrumentMessageStream } from './streaming'; import type { AnthropicAiInstrumentedMethod, @@ -350,12 +355,5 @@ function createDeepProxy(target: T, currentPath = '', options: * @returns The instrumented client with the same type as the input */ export function instrumentAnthropicAiClient(anthropicAiClient: T, options?: AnthropicAiOptions): T { - const sendDefaultPii = Boolean(getClient()?.getOptions().sendDefaultPii); - - const _options = { - recordInputs: sendDefaultPii, - recordOutputs: sendDefaultPii, - ...options, - }; - return createDeepProxy(anthropicAiClient, '', _options); + return createDeepProxy(anthropicAiClient, '', resolveAIRecordingOptions(options)); } diff --git a/packages/core/src/tracing/google-genai/index.ts b/packages/core/src/tracing/google-genai/index.ts index 7781b67d6db0..e53b320a8503 100644 --- a/packages/core/src/tracing/google-genai/index.ts +++ b/packages/core/src/tracing/google-genai/index.ts @@ -1,4 +1,3 @@ -import { getClient } from '../../currentScopes'; import { captureException } from '../../exports'; import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; import { SPAN_STATUS_ERROR } from '../../tracing'; @@ -27,7 +26,13 @@ import { GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import { truncateGenAiMessages } from '../ai/messageTruncation'; -import { buildMethodPath, extractSystemInstructions, getFinalOperationName, getSpanOperation } from '../ai/utils'; +import { + buildMethodPath, + extractSystemInstructions, + getFinalOperationName, + getSpanOperation, + resolveAIRecordingOptions, +} from '../ai/utils'; import { CHAT_PATH, CHATS_CREATE_METHOD, GOOGLE_GENAI_SYSTEM_NAME } from './constants'; import { instrumentStream } from './streaming'; import type { @@ -393,12 +398,5 @@ function createDeepProxy(target: T, currentPath = '', options: * ``` */ export function instrumentGoogleGenAIClient(client: T, options?: GoogleGenAIOptions): T { - const sendDefaultPii = Boolean(getClient()?.getOptions().sendDefaultPii); - - const _options = { - recordInputs: sendDefaultPii, - recordOutputs: sendDefaultPii, - ...options, - }; - return createDeepProxy(client, '', _options); + return createDeepProxy(client, '', resolveAIRecordingOptions(options)); } diff --git a/packages/core/src/tracing/langchain/index.ts b/packages/core/src/tracing/langchain/index.ts index 7484219d32d9..54b581af9f2d 100644 --- a/packages/core/src/tracing/langchain/index.ts +++ b/packages/core/src/tracing/langchain/index.ts @@ -10,6 +10,7 @@ import { GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_TOOL_OUTPUT_ATTRIBUTE, } from '../ai/gen-ai-attributes'; +import { resolveAIRecordingOptions } from '../ai/utils'; import { LANGCHAIN_ORIGIN } from './constants'; import type { LangChainCallbackHandler, @@ -32,8 +33,7 @@ import { * This is a stateful handler that tracks spans across multiple LangChain executions. */ export function createLangChainCallbackHandler(options: LangChainOptions = {}): LangChainCallbackHandler { - const recordInputs = options.recordInputs ?? false; - const recordOutputs = options.recordOutputs ?? false; + const { recordInputs, recordOutputs } = resolveAIRecordingOptions(options); // Internal state - single instance tracks all spans const spanMap = new Map(); diff --git a/packages/core/src/tracing/langgraph/index.ts b/packages/core/src/tracing/langgraph/index.ts index c1e838bd1914..c010520d10cc 100644 --- a/packages/core/src/tracing/langgraph/index.ts +++ b/packages/core/src/tracing/langgraph/index.ts @@ -13,7 +13,7 @@ import { GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import { truncateGenAiMessages } from '../ai/messageTruncation'; -import { extractSystemInstructions } from '../ai/utils'; +import { extractSystemInstructions, resolveAIRecordingOptions } from '../ai/utils'; import type { LangChainMessage } from '../langchain/types'; import { normalizeLangChainMessages } from '../langchain/utils'; import { startSpan } from '../trace'; @@ -207,9 +207,7 @@ export function instrumentLangGraph any stateGraph: T, options?: LangGraphOptions, ): T { - const _options: LangGraphOptions = options || {}; - - stateGraph.compile = instrumentStateGraphCompile(stateGraph.compile.bind(stateGraph), _options); + stateGraph.compile = instrumentStateGraphCompile(stateGraph.compile, resolveAIRecordingOptions(options)); return stateGraph; } diff --git a/packages/core/src/tracing/openai/index.ts b/packages/core/src/tracing/openai/index.ts index cfbdc5cfb4b1..484128810b01 100644 --- a/packages/core/src/tracing/openai/index.ts +++ b/packages/core/src/tracing/openai/index.ts @@ -1,4 +1,3 @@ -import { getClient } from '../../currentScopes'; import { DEBUG_BUILD } from '../../debug-build'; import { captureException } from '../../exports'; import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; @@ -19,7 +18,7 @@ import { GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, OPENAI_OPERATIONS, } from '../ai/gen-ai-attributes'; -import { extractSystemInstructions, getTruncatedJsonString } from '../ai/utils'; +import { extractSystemInstructions, getTruncatedJsonString, resolveAIRecordingOptions } from '../ai/utils'; import { instrumentStream } from './streaming'; import type { ChatCompletionChunk, @@ -370,13 +369,5 @@ function createDeepProxy(target: T, currentPath = '', options: * Can be used across Node.js, Cloudflare Workers, and Vercel Edge */ export function instrumentOpenAiClient(client: T, options?: OpenAiOptions): T { - const sendDefaultPii = Boolean(getClient()?.getOptions().sendDefaultPii); - - const _options = { - recordInputs: sendDefaultPii, - recordOutputs: sendDefaultPii, - ...options, - }; - - return createDeepProxy(client, '', _options); + return createDeepProxy(client, '', resolveAIRecordingOptions(options)); } diff --git a/packages/core/src/tracing/openai/utils.ts b/packages/core/src/tracing/openai/utils.ts index 82494f7ae018..3338d4524d75 100644 --- a/packages/core/src/tracing/openai/utils.ts +++ b/packages/core/src/tracing/openai/utils.ts @@ -222,6 +222,7 @@ export function addResponsesApiAttributes(span: Span, response: OpenAIResponseOb // Filter for function_call type objects in the output array const functionCalls = responseWithOutput.output.filter( (item): unknown => + // oxlint-disable-next-line typescript/prefer-optional-chain typeof item === 'object' && item !== null && (item as Record).type === 'function_call', ); diff --git a/packages/core/src/tracing/vercel-ai/constants.ts b/packages/core/src/tracing/vercel-ai/constants.ts index 94561dae3e98..fb82c6063dd4 100644 --- a/packages/core/src/tracing/vercel-ai/constants.ts +++ b/packages/core/src/tracing/vercel-ai/constants.ts @@ -6,15 +6,7 @@ import type { ToolCallSpanContext } from './types'; export const toolCallSpanContextMap = new Map(); // Operation sets for efficient mapping to OpenTelemetry semantic convention values -export const INVOKE_AGENT_OPS = new Set([ - 'ai.generateText', - 'ai.streamText', - 'ai.generateObject', - 'ai.streamObject', - 'ai.embed', - 'ai.embedMany', - 'ai.rerank', -]); +export const INVOKE_AGENT_OPS = new Set(['ai.generateText', 'ai.streamText', 'ai.generateObject', 'ai.streamObject']); export const GENERATE_CONTENT_OPS = new Set([ 'ai.generateText.doGenerate', @@ -28,7 +20,7 @@ export const EMBEDDINGS_OPS = new Set(['ai.embed.doEmbed', 'ai.embedMany.doEmbed export const RERANK_OPS = new Set(['ai.rerank.doRerank']); export const DO_SPAN_NAME_PREFIX: Record = { - 'ai.embed.doEmbed': 'embed', - 'ai.embedMany.doEmbed': 'embed_many', + 'ai.embed.doEmbed': 'embeddings', + 'ai.embedMany.doEmbed': 'embeddings', 'ai.rerank.doRerank': 'rerank', }; diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index 919c06eb12d6..17e26b7b6bac 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -5,8 +5,10 @@ import type { Event } from '../../types-hoist/event'; import type { Span, SpanAttributes, SpanAttributeValue, SpanJSON } from '../../types-hoist/span'; import { spanToJSON } from '../../utils/spanUtils'; import { + GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_TOOL_CALL_ID_ATTRIBUTE, @@ -32,6 +34,7 @@ import type { TokenSummary } from './types'; import { accumulateTokensForParent, applyAccumulatedTokens, + applyToolDescriptionsAndTokens, convertAvailableToolsToJsonString, getSpanOpFromName, requestMessagesFromPrompt, @@ -42,6 +45,7 @@ import { AI_OPERATION_ID_ATTRIBUTE, AI_PROMPT_MESSAGES_ATTRIBUTE, AI_PROMPT_TOOLS_ATTRIBUTE, + AI_RESPONSE_FINISH_REASON_ATTRIBUTE, AI_RESPONSE_OBJECT_ATTRIBUTE, AI_RESPONSE_PROVIDER_METADATA_ATTRIBUTE, AI_RESPONSE_TEXT_ATTRIBUTE, @@ -55,6 +59,8 @@ import { AI_USAGE_CACHED_INPUT_TOKENS_ATTRIBUTE, AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, + AI_USAGE_TOKENS_ATTRIBUTE, + AI_VALUES_ATTRIBUTE, OPERATION_NAME_ATTRIBUTE, } from './vercel-ai-attributes'; @@ -124,24 +130,132 @@ function vercelAiEventProcessor(event: Event): Event { accumulateTokensForParent(span, tokenAccumulator); } - // Second pass: apply accumulated token data to parent spans - for (const span of event.spans) { - if (span.op !== 'gen_ai.invoke_agent') { - continue; - } - - applyAccumulatedTokens(span, tokenAccumulator); - } + // Second pass: apply tool descriptions and accumulated tokens + applyToolDescriptionsAndTokens(event.spans, tokenAccumulator); // Also apply to root when it is the invoke_agent pipeline const trace = event.contexts?.trace; - if (trace && trace.op === 'gen_ai.invoke_agent') { + if (trace?.op === 'gen_ai.invoke_agent') { applyAccumulatedTokens(trace, tokenAccumulator); } } return event; } + +/** + * Tool call structure from Vercel AI SDK + * Note: V5/V6 use 'input' for arguments, V4 and earlier use 'args' + */ +interface VercelToolCall { + toolCallId: string; + toolName: string; + input?: Record | string; // V5/V6 + args?: string; // V4 and earlier +} + +/** + * Normalize finish reason to match OpenTelemetry semantic conventions. + * Valid values: "stop", "length", "content_filter", "tool_call", "error" + * + * Vercel AI SDK uses "tool-calls" (plural, with hyphen) which we map to "tool_call". + */ +function normalizeFinishReason(finishReason: unknown): string { + if (typeof finishReason !== 'string') { + return 'stop'; + } + + // Map Vercel AI SDK finish reasons to OpenTelemetry semantic convention values + switch (finishReason) { + case 'tool-calls': + return 'tool_call'; + case 'stop': + case 'length': + case 'content_filter': + case 'error': + return finishReason; + default: + // For unknown values, return as-is (schema allows arbitrary strings) + return finishReason; + } +} + +/** + * Build gen_ai.output.messages from ai.response.text and/or ai.response.toolCalls + * + * Format follows OpenTelemetry semantic conventions: + * [{"role": "assistant", "parts": [...], "finish_reason": "stop"}] + * + * Parts can be: + * - {"type": "text", "content": "..."} + * - {"type": "tool_call", "id": "...", "name": "...", "arguments": "..."} + */ +function buildOutputMessages(attributes: Record): void { + const responseText = attributes[AI_RESPONSE_TEXT_ATTRIBUTE]; + const responseToolCalls = attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]; + const finishReason = attributes[AI_RESPONSE_FINISH_REASON_ATTRIBUTE]; + + // Skip if neither text nor tool calls are present + if (responseText == null && responseToolCalls == null) { + return; + } + + const parts: Array> = []; + + // Add text part if present + if (typeof responseText === 'string' && responseText.length > 0) { + parts.push({ + type: 'text', + content: responseText, + }); + } + + // Add tool call parts if present + if (responseToolCalls != null) { + try { + // Tool calls can be a string (JSON) or already parsed array + const toolCalls: VercelToolCall[] = + typeof responseToolCalls === 'string' ? JSON.parse(responseToolCalls) : responseToolCalls; + + if (Array.isArray(toolCalls)) { + for (const toolCall of toolCalls) { + // V5/V6 use 'input', V4 and earlier use 'args' + const args = toolCall.input ?? toolCall.args; + parts.push({ + type: 'tool_call', + id: toolCall.toolCallId, + name: toolCall.toolName, + // Handle undefined args: JSON.stringify(undefined) returns undefined, not a string, + // which would cause the property to be omitted from the final JSON output + arguments: typeof args === 'string' ? args : JSON.stringify(args ?? {}), + }); + } + // Only delete tool calls attribute if we successfully processed them + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]; + } + } catch { + // Ignore parsing errors - tool calls attribute is preserved + } + } + + // Only set output messages and delete text attribute if we have parts + if (parts.length > 0) { + const outputMessage = { + role: 'assistant', + parts, + finish_reason: normalizeFinishReason(finishReason), + }; + + attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE] = JSON.stringify([outputMessage]); + + // Remove the text attribute since it's now captured in gen_ai.output.messages + // Note: tool calls attribute is deleted above only if successfully parsed + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[AI_RESPONSE_TEXT_ATTRIBUTE]; + } +} + /** * Post-process spans emitted by the Vercel AI SDK. */ @@ -160,6 +274,9 @@ function processEndedVercelAiSpan(span: SpanJSON): void { renameAttributeKey(attributes, 'ai.usage.inputTokens', GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE); renameAttributeKey(attributes, 'ai.usage.outputTokens', GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE); + // Embedding spans use ai.usage.tokens instead of promptTokens/completionTokens + renameAttributeKey(attributes, AI_USAGE_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE); + // AI SDK uses avgOutputTokensPerSecond, map to our expected attribute name renameAttributeKey(attributes, 'ai.response.avgOutputTokensPerSecond', 'ai.response.avgCompletionTokensPerSecond'); @@ -172,12 +289,13 @@ function processEndedVercelAiSpan(span: SpanJSON): void { attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE]; } - if ( - typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && - typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' - ) { - attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE] = - attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; + // Compute total tokens from input + output (embeddings may only have input tokens) + if (typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number') { + const outputTokens = + typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' + ? attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + : 0; + attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE] = outputTokens + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; } // Convert the available tools array to a JSON string @@ -196,8 +314,11 @@ function processEndedVercelAiSpan(span: SpanJSON): void { delete attributes[OPERATION_NAME_ATTRIBUTE]; } renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ATTRIBUTE); - renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text'); - renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls'); + + // Build gen_ai.output.messages from response text and/or tool calls + // Note: buildOutputMessages also removes the source attributes when output is successfully generated + buildOutputMessages(attributes); + renameAttributeKey(attributes, AI_RESPONSE_OBJECT_ATTRIBUTE, 'gen_ai.response.object'); renameAttributeKey(attributes, AI_PROMPT_TOOLS_ATTRIBUTE, 'gen_ai.request.available_tools'); @@ -207,6 +328,20 @@ function processEndedVercelAiSpan(span: SpanJSON): void { renameAttributeKey(attributes, AI_SCHEMA_ATTRIBUTE, 'gen_ai.request.schema'); renameAttributeKey(attributes, AI_MODEL_ID_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE); + // Map embedding input: ai.values → gen_ai.embeddings.input + // Vercel AI SDK JSON-stringifies each value individually, so we parse each element back. + // Single embed gets unwrapped to a plain value; batch embedMany stays as a JSON array. + if (Array.isArray(attributes[AI_VALUES_ATTRIBUTE])) { + const parsed = (attributes[AI_VALUES_ATTRIBUTE] as string[]).map(v => { + try { + return JSON.parse(v); + } catch { + return v; + } + }); + attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE] = parsed.length === 1 ? parsed[0] : JSON.stringify(parsed); + } + addProviderMetadataToAttributes(attributes); // Change attributes namespaced with `ai.X` to `vercel.ai.X` diff --git a/packages/core/src/tracing/vercel-ai/utils.ts b/packages/core/src/tracing/vercel-ai/utils.ts index 139d75a241ee..e72efde75e18 100644 --- a/packages/core/src/tracing/vercel-ai/utils.ts +++ b/packages/core/src/tracing/vercel-ai/utils.ts @@ -10,9 +10,12 @@ import { GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_INVOKE_AGENT_OPERATION_ATTRIBUTE, GEN_AI_RERANK_DO_RERANK_OPERATION_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_STREAM_OBJECT_DO_STREAM_OPERATION_ATTRIBUTE, GEN_AI_STREAM_TEXT_DO_STREAM_OPERATION_ATTRIBUTE, GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, + GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; @@ -74,6 +77,61 @@ export function applyAccumulatedTokens( } } +/** + * Builds a map of tool name -> description from all spans with available_tools. + * This avoids O(n²) iteration and repeated JSON parsing. + */ +function buildToolDescriptionMap(spans: SpanJSON[]): Map { + const toolDescriptions = new Map(); + + for (const span of spans) { + const availableTools = span.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]; + if (typeof availableTools !== 'string') { + continue; + } + try { + const tools = JSON.parse(availableTools) as Array<{ name?: string; description?: string }>; + for (const tool of tools) { + if (tool.name && tool.description && !toolDescriptions.has(tool.name)) { + toolDescriptions.set(tool.name, tool.description); + } + } + } catch { + // ignore parse errors + } + } + + return toolDescriptions; +} + +/** + * Applies tool descriptions and accumulated tokens to spans in a single pass. + * + * - For `gen_ai.execute_tool` spans: looks up tool description from + * `gen_ai.request.available_tools` on sibling spans + * - For `gen_ai.invoke_agent` spans: applies accumulated token data from children + */ +export function applyToolDescriptionsAndTokens(spans: SpanJSON[], tokenAccumulator: Map): void { + // Build lookup map once to avoid O(n²) iteration and repeated JSON parsing + const toolDescriptions = buildToolDescriptionMap(spans); + + for (const span of spans) { + if (span.op === 'gen_ai.execute_tool') { + const toolName = span.data[GEN_AI_TOOL_NAME_ATTRIBUTE]; + if (typeof toolName === 'string') { + const description = toolDescriptions.get(toolName); + if (description) { + span.data[GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE] = description; + } + } + } + + if (span.op === 'gen_ai.invoke_agent') { + applyAccumulatedTokens(span, tokenAccumulator); + } + } +} + /** * Get the span context associated with a tool call ID. */ @@ -235,9 +293,6 @@ export function getSpanOpFromName(name: string): string | undefined { case 'ai.streamText': case 'ai.generateObject': case 'ai.streamObject': - case 'ai.embed': - case 'ai.embedMany': - case 'ai.rerank': return GEN_AI_INVOKE_AGENT_OPERATION_ATTRIBUTE; case 'ai.generateText.doGenerate': return GEN_AI_GENERATE_TEXT_DO_GENERATE_OPERATION_ATTRIBUTE; diff --git a/packages/core/src/types-hoist/polymorphics.ts b/packages/core/src/types-hoist/polymorphics.ts index 74ade60e2098..aaec637ba969 100644 --- a/packages/core/src/types-hoist/polymorphics.ts +++ b/packages/core/src/types-hoist/polymorphics.ts @@ -13,20 +13,13 @@ export interface PolymorphicEvent { } /** A `Request` type compatible with Node, Express, browser, etc., because everything is optional */ -export type PolymorphicRequest = BaseRequest & - BrowserRequest & - NodeRequest & - ExpressRequest & - KoaRequest & - NextjsRequest; +export type PolymorphicRequest = BaseRequest & NodeRequest & ExpressRequest & KoaRequest & NextjsRequest; type BaseRequest = { method?: string; url?: string; }; -type BrowserRequest = BaseRequest; - type NodeRequest = BaseRequest & { headers?: { [key: string]: string | string[] | undefined; diff --git a/packages/core/src/utils/debug-logger.ts b/packages/core/src/utils/debug-logger.ts index bbc524729674..6f52020986a4 100644 --- a/packages/core/src/utils/debug-logger.ts +++ b/packages/core/src/utils/debug-logger.ts @@ -85,10 +85,6 @@ function log(...args: Parameters): void { _maybeLog('log', ...args); } -function info(...args: Parameters): void { - _maybeLog('info', ...args); -} - function warn(...args: Parameters): void { _maybeLog('warn', ...args); } diff --git a/packages/core/src/utils/exports.ts b/packages/core/src/utils/exports.ts index 588e758e88f9..fbfdea94cff4 100644 --- a/packages/core/src/utils/exports.ts +++ b/packages/core/src/utils/exports.ts @@ -21,7 +21,7 @@ export function replaceExports( // Replace the named export - handle read-only properties try { exports[exportName] = wrappedConstructor; - } catch (error) { + } catch { // If direct assignment fails, override the property descriptor Object.defineProperty(exports, exportName, { value: wrappedConstructor, @@ -35,7 +35,7 @@ export function replaceExports( if (exports.default === original) { try { exports.default = wrappedConstructor; - } catch (error) { + } catch { Object.defineProperty(exports, 'default', { value: wrappedConstructor, writable: true, diff --git a/packages/core/src/utils/prepareEvent.ts b/packages/core/src/utils/prepareEvent.ts index 6528873c3dee..95e244df2092 100644 --- a/packages/core/src/utils/prepareEvent.ts +++ b/packages/core/src/utils/prepareEvent.ts @@ -95,6 +95,7 @@ export function prepareEvent( ]; // Skip event processors for internal exceptions to prevent recursion + // oxlint-disable-next-line typescript/prefer-optional-chain const isInternalException = hint.data && (hint.data as { __sentry__: boolean }).__sentry__ === true; const result = isInternalException ? resolvedSyncPromise(prepared) diff --git a/packages/core/src/utils/request.ts b/packages/core/src/utils/request.ts index d328a16e05d9..6aaceb8fc201 100644 --- a/packages/core/src/utils/request.ts +++ b/packages/core/src/utils/request.ts @@ -152,15 +152,22 @@ const SENSITIVE_HEADER_SNIPPETS = [ const PII_HEADER_SNIPPETS = ['x-forwarded-', '-user']; /** - * Converts incoming HTTP request headers to OpenTelemetry span attributes following semantic conventions. - * Header names are converted to the format: http.request.header. + * Converts incoming HTTP request or response headers to OpenTelemetry span attributes following semantic conventions. + * Header names are converted to the format: http..header. * where is the header name in lowercase with dashes converted to underscores. * + * @param lifecycle - The lifecycle of the headers, either 'request' or 'response' + * * @see https://opentelemetry.io/docs/specs/semconv/registry/attributes/http/#http-request-header + * @see https://opentelemetry.io/docs/specs/semconv/registry/attributes/http/#http-response-header + * + * @see https://getsentry.github.io/sentry-conventions/attributes/http/#http-request-header-key + * @see https://getsentry.github.io/sentry-conventions/attributes/http/#http-response-header-key */ export function httpHeadersToSpanAttributes( headers: Record, sendDefaultPii: boolean = false, + lifecycle: 'request' | 'response' = 'request', ): Record { const spanAttributes: Record = {}; @@ -189,10 +196,17 @@ export function httpHeadersToSpanAttributes( const lowerCasedCookieKey = cookieKey.toLowerCase(); - addSpanAttribute(spanAttributes, lowerCasedHeaderKey, lowerCasedCookieKey, cookieValue, sendDefaultPii); + addSpanAttribute( + spanAttributes, + lowerCasedHeaderKey, + lowerCasedCookieKey, + cookieValue, + sendDefaultPii, + lifecycle, + ); } } else { - addSpanAttribute(spanAttributes, lowerCasedHeaderKey, '', value, sendDefaultPii); + addSpanAttribute(spanAttributes, lowerCasedHeaderKey, '', value, sendDefaultPii, lifecycle); } }); } catch { @@ -212,15 +226,15 @@ function addSpanAttribute( cookieKey: string, value: string | string[] | undefined, sendPii: boolean, + lifecycle: 'request' | 'response', ): void { - const normalizedKey = cookieKey - ? `http.request.header.${normalizeAttributeKey(headerKey)}.${normalizeAttributeKey(cookieKey)}` - : `http.request.header.${normalizeAttributeKey(headerKey)}`; - const headerValue = handleHttpHeader(cookieKey || headerKey, value, sendPii); - if (headerValue !== undefined) { - spanAttributes[normalizedKey] = headerValue; + if (headerValue == null) { + return; } + + const normalizedKey = `http.${lifecycle}.header.${normalizeAttributeKey(headerKey)}${cookieKey ? `.${normalizeAttributeKey(cookieKey)}` : ''}`; + spanAttributes[normalizedKey] = headerValue; } function handleHttpHeader( diff --git a/packages/core/src/utils/timestampSequence.ts b/packages/core/src/utils/timestampSequence.ts new file mode 100644 index 000000000000..d2755d7a0724 --- /dev/null +++ b/packages/core/src/utils/timestampSequence.ts @@ -0,0 +1,41 @@ +const SEQUENCE_ATTR_KEY = 'sentry.timestamp.sequence'; + +let _sequenceNumber = 0; +let _previousTimestampMs: number | undefined; + +/** + * Returns the `sentry.timestamp.sequence` attribute entry for a serialized telemetry item. + * + * The sequence number starts at 0 and increments by 1 for each item captured. + * It resets to 0 when the current item's integer millisecond timestamp differs + * from the previous item's integer millisecond timestamp. + * + * @param timestampInSeconds - The timestamp of the telemetry item in seconds. + */ +export function getSequenceAttribute(timestampInSeconds: number): { + key: string; + value: { value: number; type: 'integer' }; +} { + const nowMs = Math.floor(timestampInSeconds * 1000); + + if (_previousTimestampMs !== undefined && nowMs !== _previousTimestampMs) { + _sequenceNumber = 0; + } + + const value = _sequenceNumber; + _sequenceNumber++; + _previousTimestampMs = nowMs; + + return { + key: SEQUENCE_ATTR_KEY, + value: { value, type: 'integer' }, + }; +} + +/** + * Resets the sequence number state. Only exported for testing purposes. + */ +export function _INTERNAL_resetSequenceNumber(): void { + _sequenceNumber = 0; + _previousTimestampMs = undefined; +} diff --git a/packages/core/test/lib/client.test.ts b/packages/core/test/lib/client.test.ts index e7335f0de7e0..1548a4aecce4 100644 --- a/packages/core/test/lib/client.test.ts +++ b/packages/core/test/lib/client.test.ts @@ -2816,8 +2816,6 @@ describe('Client', () => { // We could set "NODE_OPTIONS='--unhandled-rejections=warn' but it // would affect the entire test suite. // Maybe this can be re-enabled when switching to vitest. - // - // eslint-disable-next-line jest/no-disabled-tests test.skip('handles asynchronous errors', async () => { const error = new Error('Test error'); const callback = vi.fn().mockRejectedValue(error); diff --git a/packages/core/test/lib/logs/internal.test.ts b/packages/core/test/lib/logs/internal.test.ts index 2eec7c64dcbc..360485f5ca84 100644 --- a/packages/core/test/lib/logs/internal.test.ts +++ b/packages/core/test/lib/logs/internal.test.ts @@ -1,13 +1,18 @@ -import { describe, expect, it, vi } from 'vitest'; +import { beforeEach, describe, expect, it, vi } from 'vitest'; import { fmt, Scope } from '../../../src'; import { _INTERNAL_captureLog, _INTERNAL_flushLogsBuffer, _INTERNAL_getLogBuffer } from '../../../src/logs/internal'; import type { Log } from '../../../src/types-hoist/log'; import * as loggerModule from '../../../src/utils/debug-logger'; +import * as timeModule from '../../../src/utils/time'; +import { _INTERNAL_resetSequenceNumber } from '../../../src/utils/timestampSequence'; import { getDefaultTestClientOptions, TestClient } from '../../mocks/client'; const PUBLIC_DSN = 'https://username@domain/123'; describe('_INTERNAL_captureLog', () => { + beforeEach(() => { + _INTERNAL_resetSequenceNumber(); + }); it('captures and sends logs', () => { const options = getDefaultTestClientOptions({ dsn: PUBLIC_DSN, enableLogs: true }); const client = new TestClient(options); @@ -23,7 +28,9 @@ describe('_INTERNAL_captureLog', () => { timestamp: expect.any(Number), trace_id: expect.any(String), severity_number: 9, - attributes: {}, + attributes: { + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, + }, }), ); }); @@ -86,6 +93,7 @@ describe('_INTERNAL_captureLog', () => { value: 'test', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -117,6 +125,7 @@ describe('_INTERNAL_captureLog', () => { value: '7.0.0', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -168,6 +177,7 @@ describe('_INTERNAL_captureLog', () => { value: 'auth', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -219,6 +229,7 @@ describe('_INTERNAL_captureLog', () => { type: 'boolean', value: true, }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); }); @@ -278,6 +289,7 @@ describe('_INTERNAL_captureLog', () => { value: 'Sentry', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -290,7 +302,9 @@ describe('_INTERNAL_captureLog', () => { _INTERNAL_captureLog({ level: 'debug', message: fmt`User logged in` }, scope); const logAttributes = _INTERNAL_getLogBuffer(client)?.[0]?.attributes; - expect(logAttributes).toEqual({}); + expect(logAttributes).toEqual({ + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, + }); }); it('processes logs through beforeSendLog when provided', () => { @@ -344,7 +358,6 @@ describe('_INTERNAL_captureLog', () => { value: true, type: 'boolean', }, - // during serialization, they're converted to the typed attribute format scope_1: { value: 'attribute_value', type: 'string', @@ -354,6 +367,7 @@ describe('_INTERNAL_captureLog', () => { unit: 'gigabytes', type: 'integer', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }, }), ); @@ -439,6 +453,7 @@ describe('_INTERNAL_captureLog', () => { value: 'sampled-replay-id', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -464,8 +479,9 @@ describe('_INTERNAL_captureLog', () => { expect(mockReplayIntegration.getReplayId).toHaveBeenCalledWith(true); const logAttributes = _INTERNAL_getLogBuffer(client)?.[0]?.attributes; - // Should not include sentry.replay_id attribute - expect(logAttributes).toEqual({}); + expect(logAttributes).toEqual({ + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, + }); }); it('includes replay ID for buffer mode sessions', () => { @@ -499,6 +515,7 @@ describe('_INTERNAL_captureLog', () => { value: true, type: 'boolean', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -514,8 +531,9 @@ describe('_INTERNAL_captureLog', () => { _INTERNAL_captureLog({ level: 'info', message: 'test log without replay' }, scope); const logAttributes = _INTERNAL_getLogBuffer(client)?.[0]?.attributes; - // Should not include sentry.replay_id attribute - expect(logAttributes).toEqual({}); + expect(logAttributes).toEqual({ + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, + }); }); it('combines replay ID with other log attributes', () => { @@ -568,6 +586,7 @@ describe('_INTERNAL_captureLog', () => { value: 'test-replay-id', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -592,7 +611,9 @@ describe('_INTERNAL_captureLog', () => { _INTERNAL_captureLog({ level: 'info', message: `test log with replay returning ${returnValue}` }, scope); const logAttributes = _INTERNAL_getLogBuffer(client)?.[0]?.attributes; - expect(logAttributes).toEqual({}); + expect(logAttributes).toEqual({ + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, + }); expect(logAttributes).not.toHaveProperty('sentry.replay_id'); }); }); @@ -626,6 +647,7 @@ describe('_INTERNAL_captureLog', () => { value: true, type: 'boolean', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -654,6 +676,7 @@ describe('_INTERNAL_captureLog', () => { value: 'session-replay-id', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); expect(logAttributes).not.toHaveProperty('sentry._internal.replay_is_buffering'); }); @@ -683,6 +706,7 @@ describe('_INTERNAL_captureLog', () => { value: 'stopped-replay-id', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); expect(logAttributes).not.toHaveProperty('sentry._internal.replay_is_buffering'); }); @@ -708,7 +732,9 @@ describe('_INTERNAL_captureLog', () => { expect(mockReplayIntegration.getRecordingMode).not.toHaveBeenCalled(); const logAttributes = _INTERNAL_getLogBuffer(client)?.[0]?.attributes; - expect(logAttributes).toEqual({}); + expect(logAttributes).toEqual({ + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, + }); expect(logAttributes).not.toHaveProperty('sentry.replay_id'); expect(logAttributes).not.toHaveProperty('sentry.internal.replay_is_buffering'); }); @@ -725,7 +751,9 @@ describe('_INTERNAL_captureLog', () => { _INTERNAL_captureLog({ level: 'info', message: 'test log without replay integration' }, scope); const logAttributes = _INTERNAL_getLogBuffer(client)?.[0]?.attributes; - expect(logAttributes).toEqual({}); + expect(logAttributes).toEqual({ + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, + }); expect(logAttributes).not.toHaveProperty('sentry.replay_id'); expect(logAttributes).not.toHaveProperty('sentry._internal.replay_is_buffering'); }); @@ -784,6 +812,7 @@ describe('_INTERNAL_captureLog', () => { value: true, type: 'boolean', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); }); @@ -819,6 +848,7 @@ describe('_INTERNAL_captureLog', () => { value: 'testuser', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -844,6 +874,7 @@ describe('_INTERNAL_captureLog', () => { value: '123', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -874,6 +905,7 @@ describe('_INTERNAL_captureLog', () => { value: 'testuser', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -891,7 +923,9 @@ describe('_INTERNAL_captureLog', () => { _INTERNAL_captureLog({ level: 'info', message: 'test log with empty user' }, scope); const logAttributes = _INTERNAL_getLogBuffer(client)?.[0]?.attributes; - expect(logAttributes).toEqual({}); + expect(logAttributes).toEqual({ + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, + }); }); it('combines user data with other log attributes', () => { @@ -945,6 +979,7 @@ describe('_INTERNAL_captureLog', () => { value: 'test', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -975,6 +1010,7 @@ describe('_INTERNAL_captureLog', () => { value: 'user@example.com', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -1018,6 +1054,7 @@ describe('_INTERNAL_captureLog', () => { value: 'user@example.com', // Only added because user.email wasn't already present type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); @@ -1066,6 +1103,7 @@ describe('_INTERNAL_captureLog', () => { value: 'scope-user', // Added from scope because not present type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, }); }); }); @@ -1126,6 +1164,101 @@ describe('_INTERNAL_captureLog', () => { value: '7.0.0', type: 'string', }, + 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' }, + }); + }); + + describe('sentry.timestamp.sequence', () => { + it('increments the sequence number across consecutive logs', () => { + vi.spyOn(timeModule, 'timestampInSeconds').mockReturnValue(1000.001); + + const options = getDefaultTestClientOptions({ dsn: PUBLIC_DSN, enableLogs: true }); + const client = new TestClient(options); + const scope = new Scope(); + scope.setClient(client); + + _INTERNAL_captureLog({ level: 'info', message: 'first' }, scope); + _INTERNAL_captureLog({ level: 'info', message: 'second' }, scope); + _INTERNAL_captureLog({ level: 'info', message: 'third' }, scope); + + const buffer = _INTERNAL_getLogBuffer(client); + expect(buffer?.[0]?.attributes?.['sentry.timestamp.sequence']).toEqual({ value: 0, type: 'integer' }); + expect(buffer?.[1]?.attributes?.['sentry.timestamp.sequence']).toEqual({ value: 1, type: 'integer' }); + expect(buffer?.[2]?.attributes?.['sentry.timestamp.sequence']).toEqual({ value: 2, type: 'integer' }); + + vi.restoreAllMocks(); + }); + + it('does not increment the sequence number for dropped logs', () => { + vi.spyOn(timeModule, 'timestampInSeconds').mockReturnValue(1000.001); + + const beforeSendLog = vi.fn().mockImplementation(log => { + if (log.message === 'drop me') { + return null; + } + return log; + }); + + const options = getDefaultTestClientOptions({ dsn: PUBLIC_DSN, enableLogs: true, beforeSendLog }); + const client = new TestClient(options); + const scope = new Scope(); + scope.setClient(client); + + _INTERNAL_captureLog({ level: 'info', message: 'keep first' }, scope); + _INTERNAL_captureLog({ level: 'info', message: 'drop me' }, scope); + _INTERNAL_captureLog({ level: 'info', message: 'keep second' }, scope); + + const buffer = _INTERNAL_getLogBuffer(client); + expect(buffer).toHaveLength(2); + expect(buffer?.[0]?.attributes?.['sentry.timestamp.sequence']).toEqual({ value: 0, type: 'integer' }); + expect(buffer?.[1]?.attributes?.['sentry.timestamp.sequence']).toEqual({ value: 1, type: 'integer' }); + + vi.restoreAllMocks(); + }); + + it('produces monotonically increasing sequence numbers within the same millisecond', () => { + vi.spyOn(timeModule, 'timestampInSeconds').mockReturnValue(1000.001); + + const options = getDefaultTestClientOptions({ dsn: PUBLIC_DSN, enableLogs: true }); + const client = new TestClient(options); + const scope = new Scope(); + scope.setClient(client); + + const count = 50; + for (let i = 0; i < count; i++) { + _INTERNAL_captureLog({ level: 'info', message: `log ${i}` }, scope); + } + + const buffer = _INTERNAL_getLogBuffer(client)!; + expect(buffer).toHaveLength(count); + + for (let i = 1; i < count; i++) { + const prev = (buffer[i - 1]?.attributes?.['sentry.timestamp.sequence'] as { value: number }).value; + const curr = (buffer[i]?.attributes?.['sentry.timestamp.sequence'] as { value: number }).value; + expect(curr).toBe(prev + 1); + } + + vi.restoreAllMocks(); + }); + + it('resets the sequence number via _INTERNAL_resetSequenceNumber', () => { + const options = getDefaultTestClientOptions({ dsn: PUBLIC_DSN, enableLogs: true }); + const client = new TestClient(options); + const scope = new Scope(); + scope.setClient(client); + + _INTERNAL_captureLog({ level: 'info', message: 'first' }, scope); + + _INTERNAL_resetSequenceNumber(); + + const client2 = new TestClient(options); + const scope2 = new Scope(); + scope2.setClient(client2); + + _INTERNAL_captureLog({ level: 'info', message: 'after reset' }, scope2); + + const buffer2 = _INTERNAL_getLogBuffer(client2); + expect(buffer2?.[0]?.attributes?.['sentry.timestamp.sequence']).toEqual({ value: 0, type: 'integer' }); }); }); }); diff --git a/packages/core/test/lib/metrics/internal.test.ts b/packages/core/test/lib/metrics/internal.test.ts index 434f4b6c8289..971a7a345883 100644 --- a/packages/core/test/lib/metrics/internal.test.ts +++ b/packages/core/test/lib/metrics/internal.test.ts @@ -1,4 +1,4 @@ -import { describe, expect, it, vi } from 'vitest'; +import { beforeEach, describe, expect, it, vi } from 'vitest'; import { Scope } from '../../../src'; import { _INTERNAL_captureMetric, @@ -7,11 +7,19 @@ import { } from '../../../src/metrics/internal'; import type { Metric } from '../../../src/types-hoist/metric'; import * as loggerModule from '../../../src/utils/debug-logger'; +import * as timeModule from '../../../src/utils/time'; +import { _INTERNAL_resetSequenceNumber } from '../../../src/utils/timestampSequence'; import { getDefaultTestClientOptions, TestClient } from '../../mocks/client'; const PUBLIC_DSN = 'https://username@domain/123'; +const SEQUENCE_ATTR = { 'sentry.timestamp.sequence': { value: expect.any(Number), type: 'integer' } }; + describe('_INTERNAL_captureMetric', () => { + beforeEach(() => { + _INTERNAL_resetSequenceNumber(); + }); + it('captures and sends metrics', () => { const options = getDefaultTestClientOptions({ dsn: PUBLIC_DSN }); const client = new TestClient(options); @@ -27,7 +35,7 @@ describe('_INTERNAL_captureMetric', () => { value: 1, timestamp: expect.any(Number), trace_id: expect.any(String), - attributes: {}, + attributes: { ...SEQUENCE_ATTR }, }), ); }); @@ -80,6 +88,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'sentry.release': { value: '1.0.0', type: 'string', @@ -110,6 +119,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'sentry.sdk.name': { value: 'sentry.javascript.node', type: 'string', @@ -160,6 +170,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, endpoint: { value: '/api/users', type: 'string', @@ -183,6 +194,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, scope_attribute_1: { value: 1, type: 'integer', @@ -213,6 +225,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'my-attribute': { value: 43, type: 'integer' }, }); }); @@ -286,6 +299,7 @@ describe('_INTERNAL_captureMetric', () => { expect.objectContaining({ name: 'modified.original.metric', attributes: { + ...SEQUENCE_ATTR, processed: { value: true, type: 'boolean', @@ -370,6 +384,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'sentry.replay_id': { value: 'sampled-replay-id', type: 'string', @@ -397,7 +412,7 @@ describe('_INTERNAL_captureMetric', () => { expect(mockReplayIntegration.getReplayId).toHaveBeenCalledWith(true); const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; - expect(metricAttributes).toEqual({}); + expect(metricAttributes).toEqual({ ...SEQUENCE_ATTR }); }); it('includes replay ID for buffer mode sessions', () => { @@ -422,6 +437,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'sentry.replay_id': { value: 'buffer-replay-id', type: 'string', @@ -445,7 +461,7 @@ describe('_INTERNAL_captureMetric', () => { _INTERNAL_captureMetric({ type: 'counter', name: 'test.metric', value: 1 }, { scope }); const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; - expect(metricAttributes).toEqual({}); + expect(metricAttributes).toEqual({ ...SEQUENCE_ATTR }); }); it('combines replay ID with other metric attributes', () => { @@ -479,6 +495,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, endpoint: { value: '/api/users', type: 'string', @@ -523,7 +540,7 @@ describe('_INTERNAL_captureMetric', () => { _INTERNAL_captureMetric({ type: 'counter', name: 'test.metric', value: 1 }, { scope }); const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; - expect(metricAttributes).toEqual({}); + expect(metricAttributes).toEqual({ ...SEQUENCE_ATTR }); expect(metricAttributes).not.toHaveProperty('sentry.replay_id'); }); }); @@ -549,6 +566,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'sentry.replay_id': { value: 'buffer-replay-id', type: 'string', @@ -581,6 +599,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'sentry.replay_id': { value: 'session-replay-id', type: 'string', @@ -610,6 +629,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'sentry.replay_id': { value: 'stopped-replay-id', type: 'string', @@ -639,7 +659,7 @@ describe('_INTERNAL_captureMetric', () => { expect(mockReplayIntegration.getRecordingMode).not.toHaveBeenCalled(); const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; - expect(metricAttributes).toEqual({}); + expect(metricAttributes).toEqual({ ...SEQUENCE_ATTR }); expect(metricAttributes).not.toHaveProperty('sentry.replay_id'); expect(metricAttributes).not.toHaveProperty('sentry._internal.replay_is_buffering'); }); @@ -656,7 +676,7 @@ describe('_INTERNAL_captureMetric', () => { _INTERNAL_captureMetric({ type: 'counter', name: 'test.metric', value: 1 }, { scope }); const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; - expect(metricAttributes).toEqual({}); + expect(metricAttributes).toEqual({ ...SEQUENCE_ATTR }); expect(metricAttributes).not.toHaveProperty('sentry.replay_id'); expect(metricAttributes).not.toHaveProperty('sentry._internal.replay_is_buffering'); }); @@ -692,6 +712,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, endpoint: { value: '/api/users', type: 'string', @@ -738,6 +759,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'user.id': { value: '123', type: 'string', @@ -769,6 +791,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'user.id': { value: '123', type: 'string', @@ -793,6 +816,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'user.email': { value: 'user@example.com', type: 'string', @@ -816,7 +840,7 @@ describe('_INTERNAL_captureMetric', () => { _INTERNAL_captureMetric({ type: 'counter', name: 'test.metric', value: 1 }, { scope }); const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; - expect(metricAttributes).toEqual({}); + expect(metricAttributes).toEqual({ ...SEQUENCE_ATTR }); }); it('combines user data with other metric attributes', () => { @@ -846,6 +870,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, endpoint: { value: '/api/users', type: 'string', @@ -890,6 +915,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'user.id': { value: 123, type: 'integer', @@ -928,6 +954,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'user.custom': { value: 'custom-value', type: 'string', @@ -971,6 +998,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'other.attr': { value: 'value', type: 'string', @@ -1027,6 +1055,7 @@ describe('_INTERNAL_captureMetric', () => { const metricAttributes = _INTERNAL_getMetricBuffer(client)?.[0]?.attributes; expect(metricAttributes).toEqual({ + ...SEQUENCE_ATTR, 'user.custom': { value: 'preserved-value', type: 'string', @@ -1049,4 +1078,48 @@ describe('_INTERNAL_captureMetric', () => { }, }); }); + + describe('sentry.timestamp.sequence', () => { + it('increments the sequence number across consecutive metrics', () => { + // Mock timestampInSeconds to return a fixed value so the sequence number + // does not reset due to millisecond boundary crossings between calls. + const timestampSpy = vi.spyOn(timeModule, 'timestampInSeconds').mockReturnValue(1234.567); + + const options = getDefaultTestClientOptions({ dsn: PUBLIC_DSN }); + const client = new TestClient(options); + const scope = new Scope(); + scope.setClient(client); + + _INTERNAL_captureMetric({ type: 'counter', name: 'first', value: 1 }, { scope }); + _INTERNAL_captureMetric({ type: 'counter', name: 'second', value: 2 }, { scope }); + _INTERNAL_captureMetric({ type: 'counter', name: 'third', value: 3 }, { scope }); + + const buffer = _INTERNAL_getMetricBuffer(client); + expect(buffer?.[0]?.attributes?.['sentry.timestamp.sequence']).toEqual({ value: 0, type: 'integer' }); + expect(buffer?.[1]?.attributes?.['sentry.timestamp.sequence']).toEqual({ value: 1, type: 'integer' }); + expect(buffer?.[2]?.attributes?.['sentry.timestamp.sequence']).toEqual({ value: 2, type: 'integer' }); + + timestampSpy.mockRestore(); + }); + + it('resets the sequence number via _INTERNAL_resetSequenceNumber', () => { + const options = getDefaultTestClientOptions({ dsn: PUBLIC_DSN }); + const client = new TestClient(options); + const scope = new Scope(); + scope.setClient(client); + + _INTERNAL_captureMetric({ type: 'counter', name: 'first', value: 1 }, { scope }); + + _INTERNAL_resetSequenceNumber(); + + const client2 = new TestClient(options); + const scope2 = new Scope(); + scope2.setClient(client2); + + _INTERNAL_captureMetric({ type: 'counter', name: 'after reset', value: 2 }, { scope: scope2 }); + + const buffer2 = _INTERNAL_getMetricBuffer(client2); + expect(buffer2?.[0]?.attributes?.['sentry.timestamp.sequence']).toEqual({ value: 0, type: 'integer' }); + }); + }); }); diff --git a/packages/core/test/lib/metrics/public-api.test.ts b/packages/core/test/lib/metrics/public-api.test.ts index df8ff49c5553..cc8052cb0152 100644 --- a/packages/core/test/lib/metrics/public-api.test.ts +++ b/packages/core/test/lib/metrics/public-api.test.ts @@ -77,6 +77,10 @@ describe('Metrics Public API', () => { value: 'GET', type: 'string', }, + 'sentry.timestamp.sequence': { + value: expect.any(Number), + type: 'integer', + }, status: { value: 200, type: 'integer', @@ -194,6 +198,10 @@ describe('Metrics Public API', () => { value: 'websocket', type: 'string', }, + 'sentry.timestamp.sequence': { + value: expect.any(Number), + type: 'integer', + }, }, }), ); @@ -284,6 +292,10 @@ describe('Metrics Public API', () => { value: 'async', type: 'string', }, + 'sentry.timestamp.sequence': { + value: expect.any(Number), + type: 'integer', + }, }, }), ); diff --git a/packages/core/test/lib/tracing/ai/utils.test.ts b/packages/core/test/lib/tracing/ai/utils.test.ts new file mode 100644 index 000000000000..28f98c846619 --- /dev/null +++ b/packages/core/test/lib/tracing/ai/utils.test.ts @@ -0,0 +1,40 @@ +import { afterEach, beforeEach, describe, expect, it } from 'vitest'; +import { getCurrentScope, getGlobalScope, getIsolationScope, setCurrentClient } from '../../../../src'; +import { resolveAIRecordingOptions } from '../../../../src/tracing/ai/utils'; +import { getDefaultTestClientOptions, TestClient } from '../../../mocks/client'; + +describe('resolveAIRecordingOptions', () => { + beforeEach(() => { + getCurrentScope().clear(); + getIsolationScope().clear(); + getGlobalScope().clear(); + }); + + afterEach(() => { + getCurrentScope().clear(); + getIsolationScope().clear(); + getGlobalScope().clear(); + }); + + function setup(sendDefaultPii: boolean): void { + const options = getDefaultTestClientOptions({ tracesSampleRate: 1, sendDefaultPii }); + const client = new TestClient(options); + setCurrentClient(client); + client.init(); + } + + it('defaults to false when sendDefaultPii is false', () => { + setup(false); + expect(resolveAIRecordingOptions()).toEqual({ recordInputs: false, recordOutputs: false }); + }); + + it('respects sendDefaultPii: true', () => { + setup(true); + expect(resolveAIRecordingOptions()).toEqual({ recordInputs: true, recordOutputs: true }); + }); + + it('explicit options override sendDefaultPii', () => { + setup(true); + expect(resolveAIRecordingOptions({ recordInputs: false })).toEqual({ recordInputs: false, recordOutputs: true }); + }); +}); diff --git a/packages/core/test/lib/tracing/vercel-ai-rerank.test.ts b/packages/core/test/lib/tracing/vercel-ai-rerank.test.ts index 7deb331020c3..8bc30b89c264 100644 --- a/packages/core/test/lib/tracing/vercel-ai-rerank.test.ts +++ b/packages/core/test/lib/tracing/vercel-ai-rerank.test.ts @@ -3,8 +3,8 @@ import { getSpanOpFromName } from '../../../src/tracing/vercel-ai/utils'; describe('vercel-ai rerank support', () => { describe('getSpanOpFromName', () => { - it('should map ai.rerank to gen_ai.invoke_agent', () => { - expect(getSpanOpFromName('ai.rerank')).toBe('gen_ai.invoke_agent'); + it('should not assign a gen_ai op to ai.rerank pipeline span', () => { + expect(getSpanOpFromName('ai.rerank')).toBeUndefined(); }); it('should map ai.rerank.doRerank to gen_ai.rerank', () => { diff --git a/packages/core/test/lib/transports/base.test.ts b/packages/core/test/lib/transports/base.test.ts index 5908e4f1877e..f1a747a64917 100644 --- a/packages/core/test/lib/transports/base.test.ts +++ b/packages/core/test/lib/transports/base.test.ts @@ -339,7 +339,7 @@ describe('createTransport', () => { try { await transport.send(CLIENT_REPORT_ENVELOPE); - } catch (e) { + } catch (_e) { // Expected to throw } @@ -383,7 +383,7 @@ describe('createTransport', () => { try { await transport.send(ERROR_ENVELOPE); - } catch (e) { + } catch (_e) { // Expected to throw } diff --git a/packages/core/test/lib/utils/object.test.ts b/packages/core/test/lib/utils/object.test.ts index a4d2a4b56ea3..e34260edef50 100644 --- a/packages/core/test/lib/utils/object.test.ts +++ b/packages/core/test/lib/utils/object.test.ts @@ -343,6 +343,7 @@ describe('objectify()', () => { testOnlyIfNodeVersionAtLeast(10)('bigint', () => { // Hack to get around the fact that literal bigints cause a syntax error in older versions of Node, so the // assignment needs to not even be parsed as code in those versions + // oxlint-disable-next-line no-unassigned-vars let bigintPrimitive; eval('bigintPrimitive = 1231n;'); diff --git a/packages/core/test/lib/utils/request.test.ts b/packages/core/test/lib/utils/request.test.ts index c17c25802599..73a19c2bfa45 100644 --- a/packages/core/test/lib/utils/request.test.ts +++ b/packages/core/test/lib/utils/request.test.ts @@ -780,6 +780,46 @@ describe('request utils', () => { 'http.request.header.x_saml_token': '[Filtered]', }); }); + + it('returns response header attributes if `lifecycle` is "response"', () => { + const headers = { + Host: 'example.com', + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36', + Accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', + 'Accept-Language': 'en-US,en;q=0.5', + 'Accept-Encoding': 'gzip, deflate', + Connection: 'keep-alive', + 'Upgrade-Insecure-Requests': '1', + 'Cache-Control': 'no-cache', + 'X-Forwarded-For': '192.168.1.1', + Authorization: '[Filtered]', + 'x-bearer-token': 'bearer', + 'x-sso-token': 'sso', + 'x-saml-token': 'saml', + 'Set-Cookie': 'session=456', + Cookie: 'session=abc123', + }; + + const result = httpHeadersToSpanAttributes(headers, false, 'response'); + + expect(result).toEqual({ + 'http.response.header.host': 'example.com', + 'http.response.header.user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36', + 'http.response.header.accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', + 'http.response.header.accept_language': 'en-US,en;q=0.5', + 'http.response.header.accept_encoding': 'gzip, deflate', + 'http.response.header.connection': 'keep-alive', + 'http.response.header.upgrade_insecure_requests': '1', + 'http.response.header.cache_control': 'no-cache', + 'http.response.header.x_forwarded_for': '[Filtered]', + 'http.response.header.authorization': '[Filtered]', + 'http.response.header.x_bearer_token': '[Filtered]', + 'http.response.header.x_saml_token': '[Filtered]', + 'http.response.header.x_sso_token': '[Filtered]', + 'http.response.header.set_cookie.session': '[Filtered]', + 'http.response.header.cookie.session': '[Filtered]', + }); + }); }); }); }); diff --git a/packages/core/test/lib/utils/timestampSequence.test.ts b/packages/core/test/lib/utils/timestampSequence.test.ts new file mode 100644 index 000000000000..0608bf296455 --- /dev/null +++ b/packages/core/test/lib/utils/timestampSequence.test.ts @@ -0,0 +1,80 @@ +import { beforeEach, describe, expect, it } from 'vitest'; +import { _INTERNAL_resetSequenceNumber, getSequenceAttribute } from '../../../src/utils/timestampSequence'; + +describe('getSequenceAttribute', () => { + beforeEach(() => { + _INTERNAL_resetSequenceNumber(); + }); + + it('returns the correct attribute key', () => { + const attr = getSequenceAttribute(1000.001); + expect(attr.key).toBe('sentry.timestamp.sequence'); + }); + + it('returns an integer type attribute', () => { + const attr = getSequenceAttribute(1000.001); + expect(attr.value.type).toBe('integer'); + }); + + it('starts at 0', () => { + const attr = getSequenceAttribute(1000.001); + expect(attr.value.value).toBe(0); + }); + + it('increments by 1 for each call within the same millisecond', () => { + const first = getSequenceAttribute(1000.001); + const second = getSequenceAttribute(1000.001); + const third = getSequenceAttribute(1000.001); + + expect(first.value.value).toBe(0); + expect(second.value.value).toBe(1); + expect(third.value.value).toBe(2); + }); + + it('resets to 0 when the integer millisecond changes', () => { + // Same millisecond (1000001ms) + expect(getSequenceAttribute(1000.001).value.value).toBe(0); + expect(getSequenceAttribute(1000.001).value.value).toBe(1); + + // Different millisecond (1000002ms) + expect(getSequenceAttribute(1000.002).value.value).toBe(0); + expect(getSequenceAttribute(1000.002).value.value).toBe(1); + }); + + it('does not reset when the fractional part changes but integer millisecond stays the same', () => { + // 1000001.0ms and 1000001.9ms both floor to 1000001ms + expect(getSequenceAttribute(1000.001).value.value).toBe(0); + expect(getSequenceAttribute(1000.0019).value.value).toBe(1); + }); + + it('resets via _INTERNAL_resetSequenceNumber', () => { + expect(getSequenceAttribute(1000.001).value.value).toBe(0); + expect(getSequenceAttribute(1000.001).value.value).toBe(1); + + _INTERNAL_resetSequenceNumber(); + + expect(getSequenceAttribute(1000.001).value.value).toBe(0); + }); + + it('resets to 0 after _INTERNAL_resetSequenceNumber even with same timestamp', () => { + getSequenceAttribute(1000.001); + getSequenceAttribute(1000.001); + + _INTERNAL_resetSequenceNumber(); + + // After reset, _previousTimestampMs is undefined, so it should start at 0 + const attr = getSequenceAttribute(1000.001); + expect(attr.value.value).toBe(0); + }); + + it('shares sequence across interleaved calls (monotonically increasing within same ms)', () => { + // Simulate interleaved log and metric captures at the same timestamp + const logSeq = getSequenceAttribute(1000.001); + const metricSeq = getSequenceAttribute(1000.001); + const logSeq2 = getSequenceAttribute(1000.001); + + expect(logSeq.value.value).toBe(0); + expect(metricSeq.value.value).toBe(1); + expect(logSeq2.value.value).toBe(2); + }); +}); diff --git a/packages/deno/package.json b/packages/deno/package.json index b1e7c027c98d..9c756e277fdf 100644 --- a/packages/deno/package.json +++ b/packages/deno/package.json @@ -37,9 +37,9 @@ "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build build-types build-test coverage node_modules/.deno sentry-deno-*.tgz", "prefix": "yarn deno-types", - "fix": "oxlint . --fix", "prelint": "yarn deno-types", - "lint": "oxlint .", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "lint:es-compatibility": "es-check es2022 ./build/esm/*.js --module", "install:deno": "node ./scripts/install-deno.mjs", "test": "run-s install:deno deno-types test:unit", diff --git a/packages/deno/src/utils/streaming.ts b/packages/deno/src/utils/streaming.ts index b999af39bf49..045a104c5e93 100644 --- a/packages/deno/src/utils/streaming.ts +++ b/packages/deno/src/utils/streaming.ts @@ -66,7 +66,7 @@ export async function streamResponse(span: Span, res: Response): Promise void, ): ReadableStream> { const reader = stream.getReader(); + // oxlint-disable-next-line typescript/no-floating-promises reader.closed.finally(() => onDone()); return new ReadableStream({ async start(controller) { diff --git a/packages/deno/src/wrap-deno-request-handler.ts b/packages/deno/src/wrap-deno-request-handler.ts index 886b5a6d67ed..510b903dc722 100644 --- a/packages/deno/src/wrap-deno-request-handler.ts +++ b/packages/deno/src/wrap-deno-request-handler.ts @@ -73,10 +73,7 @@ export const wrapDenoRequestHandler = ( assignIfSet(attributes, 'client.port', (info?.remoteAddr as Deno.NetAddr)?.port); } - Object.assign( - attributes, - httpHeadersToSpanAttributes(winterCGHeadersToDict(request.headers), client.getOptions().sendDefaultPii ?? false), - ); + Object.assign(attributes, httpHeadersToSpanAttributes(winterCGHeadersToDict(request.headers), sendDefaultPii)); attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP] = 'http.server'; isolationScope.setSDKProcessingMetadata({ normalizedRequest: winterCGRequestToRequestData(request), @@ -95,9 +92,11 @@ export const wrapDenoRequestHandler = ( res = await handler(); setHttpStatus(span, res.status); isolationScope.setContext('response', { - headers: Object.fromEntries(res.headers), status_code: res.status, }); + span.setAttributes( + httpHeadersToSpanAttributes(Object.fromEntries(res.headers), sendDefaultPii, 'response'), + ); } catch (e) { span.end(); captureException(e, { diff --git a/packages/deno/test/deno-serve.test.ts b/packages/deno/test/deno-serve.test.ts index 9c2283e365b4..6f76ce2781ab 100644 --- a/packages/deno/test/deno-serve.test.ts +++ b/packages/deno/test/deno-serve.test.ts @@ -317,9 +317,8 @@ Deno.test('Deno.serve should capture request headers and set response context', // Check response context assertEquals(transaction?.contexts?.response?.status_code, 201); - assertExists(transaction?.contexts?.response?.headers); - assertEquals(transaction?.contexts?.response?.headers?.['content-type'], 'text/plain'); - assertEquals(transaction?.contexts?.response?.headers?.['x-custom-header'], 'test'); + assertEquals(transaction?.contexts?.trace?.data?.['http.response.header.content_type'], 'text/plain'); + assertEquals(transaction?.contexts?.trace?.data?.['http.response.header.x_custom_header'], 'test'); }); Deno.test('Deno.serve should support distributed tracing with sentry-trace header', async () => { diff --git a/packages/effect/.eslintrc.js b/packages/effect/.eslintrc.js new file mode 100644 index 000000000000..d37e458c151c --- /dev/null +++ b/packages/effect/.eslintrc.js @@ -0,0 +1,15 @@ +module.exports = { + env: { + browser: true, + node: true, + }, + overrides: [ + { + files: ['vite.config.ts', 'vitest.config.ts'], + parserOptions: { + project: ['tsconfig.vite.json'], + }, + }, + ], + extends: ['../../.eslintrc.js'], +}; diff --git a/packages/effect/LICENSE b/packages/effect/LICENSE new file mode 100644 index 000000000000..fea6013e7dbf --- /dev/null +++ b/packages/effect/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2026 Functional Software Inc. dba Sentry + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/effect/README.md b/packages/effect/README.md new file mode 100644 index 000000000000..78b2f6471dc0 --- /dev/null +++ b/packages/effect/README.md @@ -0,0 +1,48 @@ +# Official Sentry SDK for Effect.ts (Alpha) + +[![npm version](https://img.shields.io/npm/v/@sentry/effect.svg)](https://www.npmjs.com/package/@sentry/effect) +[![npm dm](https://img.shields.io/npm/dm/@sentry/effect.svg)](https://www.npmjs.com/package/@sentry/effect) +[![npm dt](https://img.shields.io/npm/dt/@sentry/effect.svg)](https://www.npmjs.com/package/@sentry/effect) + +> NOTICE: This package is in alpha state and may be subject to breaking changes. + +## Getting Started + +This SDK does not have docs yet. Stay tuned. + +## Usage + +```typescript +import * as Sentry from '@sentry/effect/server'; +import { NodeRuntime } from '@effect/platform-node'; +import { Layer, Logger } from 'effect'; +import { HttpLive } from './Http.js'; + +const SentryLive = Layer.mergeAll( + Sentry.effectLayer({ + dsn: '__DSN__', + tracesSampleRate: 1.0, + enableLogs: true, + }), + Layer.setTracer(Sentry.SentryEffectTracer), + Logger.replace(Logger.defaultLogger, Sentry.SentryEffectLogger), + Sentry.SentryEffectMetricsLayer, +); + +const MainLive = HttpLive.pipe(Layer.provide(SentryLive)); +MainLive.pipe(Layer.launch, NodeRuntime.runMain); +``` + +The `effectLayer` function initializes Sentry. To enable Effect instrumentation, compose with: + +- `Layer.setTracer(Sentry.SentryEffectTracer)` - Effect spans traced as Sentry spans +- `Logger.replace(Logger.defaultLogger, Sentry.SentryEffectLogger)` - Effect logs forwarded to Sentry +- `Sentry.SentryEffectMetricsLayer` - Effect metrics sent to Sentry + +## Links + + + +- [Sentry.io](https://sentry.io/?utm_source=github&utm_medium=npm_effect) +- [Sentry Discord Server](https://discord.gg/Ww9hbqr) +- [Stack Overflow](https://stackoverflow.com/questions/tagged/sentry) diff --git a/packages/effect/package.json b/packages/effect/package.json new file mode 100644 index 000000000000..43a54b85e159 --- /dev/null +++ b/packages/effect/package.json @@ -0,0 +1,99 @@ +{ + "name": "@sentry/effect", + "version": "10.43.0", + "description": "Official Sentry SDK for Effect", + "repository": "git://github.com/getsentry/sentry-javascript.git", + "homepage": "https://github.com/getsentry/sentry-javascript/tree/master/packages/effect", + "author": "Sentry", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "files": [ + "/build" + ], + "main": "build/cjs/index.server.js", + "module": "build/esm/index.server.js", + "browser": "build/esm/index.client.js", + "types": "build/types/index.types.d.ts", + "exports": { + "./package.json": "./package.json", + ".": { + "types": "./build/types/index.types.d.ts", + "browser": { + "import": "./build/esm/index.client.js", + "require": "./build/cjs/index.client.js" + }, + "node": { + "import": "./build/esm/index.server.js", + "require": "./build/cjs/index.server.js" + } + }, + "./server": { + "types": "./build/types/index.server.d.ts", + "import": "./build/esm/index.server.js", + "require": "./build/cjs/index.server.js" + }, + "./client": { + "types": "./build/types/index.client.d.ts", + "import": "./build/esm/index.client.js", + "require": "./build/cjs/index.client.js" + } + }, + "typesVersions": { + "<5.0": { + "build/types/index.types.d.ts": [ + "build/types-ts3.8/index.types.d.ts" + ], + "build/types/index.server.d.ts": [ + "build/types-ts3.8/index.server.d.ts" + ], + "build/types/index.client.d.ts": [ + "build/types-ts3.8/index.client.d.ts" + ] + } + }, + "publishConfig": { + "access": "public" + }, + "dependencies": { + "@sentry/browser": "10.43.0", + "@sentry/core": "10.43.0", + "@sentry/node-core": "10.43.0" + }, + "peerDependencies": { + "effect": "^3.0.0" + }, + "peerDependenciesMeta": { + "effect": { + "optional": false + } + }, + "devDependencies": { + "@effect/vitest": "^0.23.9", + "effect": "^3.19.19" + }, + "scripts": { + "build": "run-p build:transpile build:types", + "build:dev": "yarn build", + "build:transpile": "rollup -c rollup.npm.config.mjs", + "build:types": "run-s build:types:core build:types:downlevel", + "build:types:core": "tsc -p tsconfig.types.json", + "build:types:downlevel": "yarn downlevel-dts build/types build/types-ts3.8 --to ts3.8", + "build:watch": "run-p build:transpile:watch", + "build:dev:watch": "yarn build:watch", + "build:transpile:watch": "rollup -c rollup.npm.config.mjs --watch", + "build:tarball": "npm pack", + "circularDepCheck": "madge --circular src/index.client.ts && madge --circular src/index.server.ts && madge --circular src/index.types.ts", + "clean": "rimraf build coverage sentry-effect-*.tgz", + "fix": "eslint . --format stylish --fix", + "lint": "eslint . --format stylish", + "test": "vitest run", + "test:watch": "vitest --watch", + "yalc:publish": "yalc publish --push --sig" + }, + "volta": { + "extends": "../../package.json" + }, + "sideEffects": false +} diff --git a/packages/effect/rollup.npm.config.mjs b/packages/effect/rollup.npm.config.mjs new file mode 100644 index 000000000000..211157646473 --- /dev/null +++ b/packages/effect/rollup.npm.config.mjs @@ -0,0 +1,25 @@ +import { makeBaseNPMConfig, makeNPMConfigVariants } from '@sentry-internal/rollup-utils'; + +const baseConfig = makeBaseNPMConfig({ + entrypoints: ['src/index.server.ts', 'src/index.client.ts'], + packageSpecificConfig: { + output: { + preserveModulesRoot: 'src', + }, + }, +}); + +const defaultExternal = baseConfig.external || []; +baseConfig.external = id => { + if (defaultExternal.includes(id)) { + return true; + } + + if (id === 'effect' || id.startsWith('effect/') || id.startsWith('@sentry/')) { + return true; + } + + return false; +}; + +export default makeNPMConfigVariants(baseConfig); diff --git a/packages/effect/src/client/index.ts b/packages/effect/src/client/index.ts new file mode 100644 index 000000000000..e60843bc1e3e --- /dev/null +++ b/packages/effect/src/client/index.ts @@ -0,0 +1,43 @@ +import type { BrowserOptions } from '@sentry/browser'; +import type * as EffectLayer from 'effect/Layer'; +import { empty as emptyLayer, suspend as suspendLayer } from 'effect/Layer'; +import { init } from './sdk'; + +export { init } from './sdk'; + +/** + * Options for the Sentry Effect client layer. + */ +export type EffectClientLayerOptions = BrowserOptions; + +/** + * Creates an Effect Layer that initializes Sentry for browser clients. + * + * To enable Effect tracing, logs, or metrics, compose with the respective layers: + * - `Layer.setTracer(Sentry.SentryEffectTracer)` for tracing + * - `Logger.replace(Logger.defaultLogger, Sentry.SentryEffectLogger)` for logs + * - `Sentry.SentryEffectMetricsLayer` for metrics + * + * @example + * ```typescript + * import * as Sentry from '@sentry/effect/client'; + * import { Layer, Logger, LogLevel } from 'effect'; + * + * const SentryLive = Layer.mergeAll( + * Sentry.effectLayer({ + * dsn: '__DSN__', + * integrations: [Sentry.browserTracingIntegration()], + * tracesSampleRate: 1.0, + * }), + * Layer.setTracer(Sentry.SentryEffectTracer), + * Logger.replace(Logger.defaultLogger, Sentry.SentryEffectLogger), + * ); + * ``` + */ +export function effectLayer(options: EffectClientLayerOptions): EffectLayer.Layer { + return suspendLayer(() => { + init(options); + + return emptyLayer; + }); +} diff --git a/packages/effect/src/client/sdk.ts b/packages/effect/src/client/sdk.ts new file mode 100644 index 000000000000..5f2210a92b3a --- /dev/null +++ b/packages/effect/src/client/sdk.ts @@ -0,0 +1,20 @@ +import type { BrowserOptions } from '@sentry/browser'; +import { init as initBrowser } from '@sentry/browser'; +import type { Client } from '@sentry/core'; +import { applySdkMetadata } from '@sentry/core'; + +/** + * Initializes the Sentry Effect SDK for browser clients. + * + * @param options - Configuration options for the SDK + * @returns The initialized Sentry client, or undefined if initialization failed + */ +export function init(options: BrowserOptions): Client | undefined { + const opts = { + ...options, + }; + + applySdkMetadata(opts, 'effect', ['effect', 'browser']); + + return initBrowser(opts); +} diff --git a/packages/effect/src/index.client.ts b/packages/effect/src/index.client.ts new file mode 100644 index 000000000000..2df8a2548fb9 --- /dev/null +++ b/packages/effect/src/index.client.ts @@ -0,0 +1,11 @@ +// import/export got a false positive, and affects most of our index barrel files +// can be removed once following issue is fixed: https://github.com/import-js/eslint-plugin-import/issues/703 +/* eslint-disable import/export */ +export * from '@sentry/browser'; + +export { effectLayer, init } from './client/index'; +export type { EffectClientLayerOptions } from './client/index'; + +export { SentryEffectTracer } from './tracer'; +export { SentryEffectLogger } from './logger'; +export { SentryEffectMetricsLayer } from './metrics'; diff --git a/packages/effect/src/index.server.ts b/packages/effect/src/index.server.ts new file mode 100644 index 000000000000..c66abbf43413 --- /dev/null +++ b/packages/effect/src/index.server.ts @@ -0,0 +1,8 @@ +export * from '@sentry/node-core/light'; + +export { effectLayer, init } from './server/index'; +export type { EffectServerLayerOptions } from './server/index'; + +export { SentryEffectTracer } from './tracer'; +export { SentryEffectLogger } from './logger'; +export { SentryEffectMetricsLayer } from './metrics'; diff --git a/packages/effect/src/index.types.ts b/packages/effect/src/index.types.ts new file mode 100644 index 000000000000..e0a6e9512eeb --- /dev/null +++ b/packages/effect/src/index.types.ts @@ -0,0 +1,26 @@ +/* eslint-disable import/export */ + +// We export everything from both the client part of the SDK and from the server part. +// Some of the exports collide, which is not allowed, unless we redefine the colliding +// exports in this file - which we do below. +import type { Client, Integration, Options, StackParser } from '@sentry/core'; +import type * as EffectLayer from 'effect/Layer'; +import type * as clientSdk from './index.client'; +import type * as serverSdk from './index.server'; + +export * from './index.client'; +export * from './index.server'; + +export type { EffectClientLayerOptions } from './index.client'; +export type { EffectServerLayerOptions } from './index.server'; + +export declare function effectLayer( + options: clientSdk.EffectClientLayerOptions | serverSdk.EffectServerLayerOptions, +): EffectLayer.Layer; + +export declare function init(options: Options | clientSdk.BrowserOptions | serverSdk.NodeOptions): Client | undefined; +export declare const linkedErrorsIntegration: typeof clientSdk.linkedErrorsIntegration; +export declare const contextLinesIntegration: typeof clientSdk.contextLinesIntegration; +export declare const getDefaultIntegrations: (options: Options) => Integration[]; +export declare const defaultStackParser: StackParser; +export declare const logger: typeof clientSdk.logger | typeof serverSdk.logger; diff --git a/packages/effect/src/logger.ts b/packages/effect/src/logger.ts new file mode 100644 index 000000000000..833f5b6b7e95 --- /dev/null +++ b/packages/effect/src/logger.ts @@ -0,0 +1,43 @@ +import { logger as sentryLogger } from '@sentry/core'; +import * as Logger from 'effect/Logger'; + +/** + * Effect Logger that sends logs to Sentry. + */ +export const SentryEffectLogger = Logger.make(({ logLevel, message }) => { + let msg: string; + if (typeof message === 'string') { + msg = message; + } else if (Array.isArray(message) && message.length === 1) { + const firstElement = message[0]; + msg = typeof firstElement === 'string' ? firstElement : JSON.stringify(firstElement); + } else { + msg = JSON.stringify(message); + } + + switch (logLevel._tag) { + case 'Fatal': + sentryLogger.fatal(msg); + break; + case 'Error': + sentryLogger.error(msg); + break; + case 'Warning': + sentryLogger.warn(msg); + break; + case 'Info': + sentryLogger.info(msg); + break; + case 'Debug': + sentryLogger.debug(msg); + break; + case 'Trace': + sentryLogger.trace(msg); + break; + case 'All': + case 'None': + break; + default: + logLevel satisfies never; + } +}); diff --git a/packages/effect/src/metrics.ts b/packages/effect/src/metrics.ts new file mode 100644 index 000000000000..82daf5e67a5d --- /dev/null +++ b/packages/effect/src/metrics.ts @@ -0,0 +1,135 @@ +import { metrics as sentryMetrics } from '@sentry/core'; +import * as Effect from 'effect/Effect'; +import type * as Layer from 'effect/Layer'; +import { scopedDiscard } from 'effect/Layer'; +import * as Metric from 'effect/Metric'; +import * as MetricKeyType from 'effect/MetricKeyType'; +import type * as MetricPair from 'effect/MetricPair'; +import * as MetricState from 'effect/MetricState'; +import * as Schedule from 'effect/Schedule'; + +type MetricAttributes = Record; + +function labelsToAttributes(labels: ReadonlyArray<{ key: string; value: string }>): MetricAttributes { + return labels.reduce((acc, label) => ({ ...acc, [label.key]: label.value }), {}); +} + +function sendMetricToSentry(pair: MetricPair.MetricPair.Untyped): void { + const { metricKey, metricState } = pair; + const name = metricKey.name; + const attributes = labelsToAttributes(metricKey.tags); + + if (MetricState.isCounterState(metricState)) { + const value = Number(metricState.count); + sentryMetrics.count(name, value, { attributes }); + } else if (MetricState.isGaugeState(metricState)) { + const value = Number(metricState.value); + sentryMetrics.gauge(name, value, { attributes }); + } else if (MetricState.isHistogramState(metricState)) { + sentryMetrics.gauge(`${name}.sum`, metricState.sum, { attributes }); + sentryMetrics.gauge(`${name}.count`, metricState.count, { attributes }); + sentryMetrics.gauge(`${name}.min`, metricState.min, { attributes }); + sentryMetrics.gauge(`${name}.max`, metricState.max, { attributes }); + } else if (MetricState.isSummaryState(metricState)) { + sentryMetrics.gauge(`${name}.sum`, metricState.sum, { attributes }); + sentryMetrics.gauge(`${name}.count`, metricState.count, { attributes }); + sentryMetrics.gauge(`${name}.min`, metricState.min, { attributes }); + sentryMetrics.gauge(`${name}.max`, metricState.max, { attributes }); + } else if (MetricState.isFrequencyState(metricState)) { + for (const [word, count] of metricState.occurrences) { + sentryMetrics.count(name, count, { + attributes: { ...attributes, word }, + }); + } + } +} + +function getMetricId(pair: MetricPair.MetricPair.Untyped): string { + const tags = pair.metricKey.tags.map(t => `${t.key}=${t.value}`).join(','); + return `${pair.metricKey.name}:${tags}`; +} + +function sendDeltaMetricToSentry( + pair: MetricPair.MetricPair.Untyped, + previousCounterValues: Map, +): void { + const { metricKey, metricState } = pair; + const name = metricKey.name; + const attributes = labelsToAttributes(metricKey.tags); + const metricId = getMetricId(pair); + + if (MetricState.isCounterState(metricState)) { + const currentValue = Number(metricState.count); + + const previousValue = previousCounterValues.get(metricId) ?? 0; + const delta = currentValue - previousValue; + + if (delta > 0) { + sentryMetrics.count(name, delta, { attributes }); + } + + previousCounterValues.set(metricId, currentValue); + } else { + sendMetricToSentry(pair); + } +} + +/** + * Flushes all Effect metrics to Sentry. + * @param previousCounterValues - Map tracking previous counter values for delta calculation + */ +function flushMetricsToSentry(previousCounterValues: Map): void { + const snapshot = Metric.unsafeSnapshot(); + + snapshot.forEach((pair: MetricPair.MetricPair.Untyped) => { + if (MetricKeyType.isCounterKey(pair.metricKey.keyType)) { + sendDeltaMetricToSentry(pair, previousCounterValues); + } else { + sendMetricToSentry(pair); + } + }); +} + +/** + * Creates a metrics flusher with its own isolated state for delta tracking. + * Useful for testing scenarios where you need to control the lifecycle. + * @internal + */ +export function createMetricsFlusher(): { + flush: () => void; + clear: () => void; +} { + const previousCounterValues = new Map(); + return { + flush: () => flushMetricsToSentry(previousCounterValues), + clear: () => previousCounterValues.clear(), + }; +} + +function createMetricsReporterEffect(previousCounterValues: Map): Effect.Effect { + const schedule = Schedule.spaced('10 seconds'); + + return Effect.repeat( + Effect.sync(() => flushMetricsToSentry(previousCounterValues)), + schedule, + ).pipe(Effect.asVoid, Effect.interruptible); +} + +/** + * Effect Layer that periodically flushes metrics to Sentry. + * The layer manages its own state for delta counter calculations, + * which is automatically cleaned up when the layer is finalized. + */ +export const SentryEffectMetricsLayer: Layer.Layer = scopedDiscard( + Effect.gen(function* () { + const previousCounterValues = new Map(); + + yield* Effect.acquireRelease(Effect.void, () => + Effect.sync(() => { + previousCounterValues.clear(); + }), + ); + + yield* Effect.forkScoped(createMetricsReporterEffect(previousCounterValues)); + }), +); diff --git a/packages/effect/src/server/index.ts b/packages/effect/src/server/index.ts new file mode 100644 index 000000000000..76c078544af1 --- /dev/null +++ b/packages/effect/src/server/index.ts @@ -0,0 +1,43 @@ +import type { NodeOptions } from '@sentry/node-core/light'; +import type * as EffectLayer from 'effect/Layer'; +import { empty as emptyLayer, suspend as suspendLayer } from 'effect/Layer'; +import { init } from './sdk'; + +export { init } from './sdk'; + +/** + * Options for the Sentry Effect server layer. + */ +export type EffectServerLayerOptions = NodeOptions; + +/** + * Creates an Effect Layer that initializes Sentry for Node.js servers. + * + * To enable Effect tracing, logs, or metrics, compose with the respective layers: + * - `Layer.setTracer(Sentry.SentryEffectTracer)` for tracing + * - `Logger.replace(Logger.defaultLogger, Sentry.SentryEffectLogger)` for logs + * - `Sentry.SentryEffectMetricsLayer` for metrics + * + * @example + * ```typescript + * import * as Sentry from '@sentry/effect/server'; + * import { NodeRuntime } from '@effect/platform-node'; + * import { Layer, Logger } from 'effect'; + * import { HttpLive } from './Http.js'; + * + * const SentryLive = Layer.mergeAll( + * Sentry.effectLayer({ dsn: '__DSN__' }), + * Layer.setTracer(Sentry.SentryEffectTracer), + * Logger.replace(Logger.defaultLogger, Sentry.SentryEffectLogger), + * ); + * + * const MainLive = HttpLive.pipe(Layer.provide(SentryLive)); + * MainLive.pipe(Layer.launch, NodeRuntime.runMain); + * ``` + */ +export function effectLayer(options: EffectServerLayerOptions): EffectLayer.Layer { + return suspendLayer(() => { + init(options); + return emptyLayer; + }); +} diff --git a/packages/effect/src/server/sdk.ts b/packages/effect/src/server/sdk.ts new file mode 100644 index 000000000000..ee910be13487 --- /dev/null +++ b/packages/effect/src/server/sdk.ts @@ -0,0 +1,20 @@ +import type { Client } from '@sentry/core'; +import { applySdkMetadata } from '@sentry/core'; +import type { NodeOptions } from '@sentry/node-core/light'; +import { init as initNode } from '@sentry/node-core/light'; + +/** + * Initializes the Sentry Effect SDK for Node.js servers. + * + * @param options - Configuration options for the SDK + * @returns The initialized Sentry client, or undefined if initialization failed + */ +export function init(options: NodeOptions): Client | undefined { + const opts = { + ...options, + }; + + applySdkMetadata(opts, 'effect', ['effect', 'node-light']); + + return initNode(opts); +} diff --git a/packages/effect/src/tracer.ts b/packages/effect/src/tracer.ts new file mode 100644 index 000000000000..f755101e4417 --- /dev/null +++ b/packages/effect/src/tracer.ts @@ -0,0 +1,159 @@ +import type { Span } from '@sentry/core'; +import { getActiveSpan, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, startInactiveSpan, withActiveSpan } from '@sentry/core'; +import type * as Context from 'effect/Context'; +import * as Exit from 'effect/Exit'; +import * as Option from 'effect/Option'; +import * as EffectTracer from 'effect/Tracer'; + +function deriveOrigin(name: string): string { + if (name.startsWith('http.server') || name.startsWith('http.client')) { + return 'auto.http.effect'; + } + + return 'auto.function.effect'; +} + +type HrTime = [number, number]; + +const SENTRY_SPAN_SYMBOL = Symbol.for('@sentry/effect.SentrySpan'); + +function nanosToHrTime(nanos: bigint): HrTime { + const seconds = Number(nanos / BigInt(1_000_000_000)); + const remainingNanos = Number(nanos % BigInt(1_000_000_000)); + return [seconds, remainingNanos]; +} + +interface SentrySpanLike extends EffectTracer.Span { + readonly [SENTRY_SPAN_SYMBOL]: true; + readonly sentrySpan: Span; +} + +function isSentrySpan(span: EffectTracer.AnySpan): span is SentrySpanLike { + return SENTRY_SPAN_SYMBOL in span; +} + +class SentrySpanWrapper implements SentrySpanLike { + public readonly [SENTRY_SPAN_SYMBOL]: true; + public readonly _tag: 'Span'; + public readonly spanId: string; + public readonly traceId: string; + public readonly attributes: Map; + public readonly sampled: boolean; + public readonly parent: Option.Option; + public readonly links: Array; + public status: EffectTracer.SpanStatus; + public readonly sentrySpan: Span; + + public constructor( + public readonly name: string, + parent: Option.Option, + public readonly context: Context.Context, + links: ReadonlyArray, + startTime: bigint, + public readonly kind: EffectTracer.SpanKind, + existingSpan: Span, + ) { + this[SENTRY_SPAN_SYMBOL] = true as const; + this._tag = 'Span' as const; + this.attributes = new Map(); + this.parent = parent; + this.links = [...links]; + this.sentrySpan = existingSpan; + + const spanContext = this.sentrySpan.spanContext(); + this.spanId = spanContext.spanId; + this.traceId = spanContext.traceId; + this.sampled = this.sentrySpan.isRecording(); + this.status = { + _tag: 'Started', + startTime, + }; + } + + public attribute(key: string, value: unknown): void { + if (!this.sentrySpan.isRecording()) { + return; + } + + this.sentrySpan.setAttribute(key, value as Parameters[1]); + this.attributes.set(key, value); + } + + public addLinks(links: ReadonlyArray): void { + this.links.push(...links); + } + + public end(endTime: bigint, exit: Exit.Exit): void { + this.status = { + _tag: 'Ended', + endTime, + exit, + startTime: this.status.startTime, + }; + + if (!this.sentrySpan.isRecording()) { + return; + } + + if (Exit.isFailure(exit)) { + const cause = exit.cause; + const message = + cause._tag === 'Fail' ? String(cause.error) : cause._tag === 'Die' ? String(cause.defect) : 'internal_error'; + this.sentrySpan.setStatus({ code: 2, message }); + } else { + this.sentrySpan.setStatus({ code: 1 }); + } + + this.sentrySpan.end(nanosToHrTime(endTime)); + } + + public event(name: string, startTime: bigint, attributes?: Record): void { + if (!this.sentrySpan.isRecording()) { + return; + } + + this.sentrySpan.addEvent(name, attributes as Parameters[1], nanosToHrTime(startTime)); + } +} + +function createSentrySpan( + name: string, + parent: Option.Option, + context: Context.Context, + links: ReadonlyArray, + startTime: bigint, + kind: EffectTracer.SpanKind, +): SentrySpanLike { + const parentSentrySpan = + Option.isSome(parent) && isSentrySpan(parent.value) ? parent.value.sentrySpan : (getActiveSpan() ?? null); + + const newSpan = startInactiveSpan({ + name, + startTime: nanosToHrTime(startTime), + attributes: { + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: deriveOrigin(name), + }, + ...(parentSentrySpan ? { parentSpan: parentSentrySpan } : {}), + }); + + return new SentrySpanWrapper(name, parent, context, links, startTime, kind, newSpan); +} + +const makeSentryTracer = (): EffectTracer.Tracer => + EffectTracer.make({ + span(name, parent, context, links, startTime, kind) { + return createSentrySpan(name, parent, context, links, startTime, kind); + }, + context(execution, fiber) { + const currentSpan = fiber.currentSpan; + if (currentSpan === undefined || !isSentrySpan(currentSpan)) { + return execution(); + } + return withActiveSpan(currentSpan.sentrySpan, execution); + }, + }); + +/** + * Effect Layer that sets up the Sentry tracer for Effect spans. + */ +export const SentryEffectTracer = makeSentryTracer(); diff --git a/packages/effect/test/index.test.ts b/packages/effect/test/index.test.ts new file mode 100644 index 000000000000..950ec06fb670 --- /dev/null +++ b/packages/effect/test/index.test.ts @@ -0,0 +1,8 @@ +import { describe, expect, it } from 'vitest'; +import * as index from '../src/index.client'; + +describe('effect index export', () => { + it('has correct exports', () => { + expect(index.captureException).toBeDefined(); + }); +}); diff --git a/packages/effect/test/layer.test.ts b/packages/effect/test/layer.test.ts new file mode 100644 index 000000000000..590502fb657e --- /dev/null +++ b/packages/effect/test/layer.test.ts @@ -0,0 +1,180 @@ +import { describe, expect, it } from '@effect/vitest'; +import * as sentryCore from '@sentry/core'; +import { getClient, getCurrentScope, getIsolationScope, SDK_VERSION, SentrySpan } from '@sentry/core'; +import { Effect, Layer, Logger, LogLevel } from 'effect'; +import { afterEach, beforeEach, vi } from 'vitest'; +import * as sentryClient from '../src/index.client'; +import * as sentryServer from '../src/index.server'; + +const TEST_DSN = 'https://username@domain/123'; + +function getMockTransport() { + return () => ({ + send: vi.fn().mockResolvedValue({}), + flush: vi.fn().mockResolvedValue(true), + }); +} + +describe.each([ + [ + { + subSdkName: 'browser', + effectLayer: sentryClient.effectLayer, + SentryEffectTracer: sentryClient.SentryEffectTracer, + SentryEffectLogger: sentryClient.SentryEffectLogger, + SentryEffectMetricsLayer: sentryClient.SentryEffectMetricsLayer, + }, + ], + [ + { + subSdkName: 'node-light', + effectLayer: sentryServer.effectLayer, + SentryEffectTracer: sentryServer.SentryEffectTracer, + SentryEffectLogger: sentryServer.SentryEffectLogger, + SentryEffectMetricsLayer: sentryServer.SentryEffectMetricsLayer, + }, + ], +])('effectLayer ($subSdkName)', ({ subSdkName, effectLayer, SentryEffectTracer, SentryEffectLogger }) => { + beforeEach(() => { + getCurrentScope().clear(); + getIsolationScope().clear(); + }); + + afterEach(() => { + getCurrentScope().setClient(undefined); + vi.restoreAllMocks(); + }); + + it('creates a valid Effect layer', () => { + const layer = effectLayer({ + dsn: TEST_DSN, + transport: getMockTransport(), + }); + + expect(layer).toBeDefined(); + expect(Layer.isLayer(layer)).toBe(true); + }); + + it.effect('applies SDK metadata', () => + Effect.gen(function* () { + yield* Effect.void; + + const client = getClient(); + const metadata = client?.getOptions()._metadata?.sdk; + + expect(metadata?.name).toBe('sentry.javascript.effect'); + expect(metadata?.packages).toEqual([ + { name: 'npm:@sentry/effect', version: SDK_VERSION }, + { name: `npm:@sentry/${subSdkName}`, version: SDK_VERSION }, + ]); + }).pipe( + Effect.provide( + effectLayer({ + dsn: TEST_DSN, + transport: getMockTransport(), + }), + ), + ), + ); + + it.effect('layer can be provided to an Effect program', () => + Effect.gen(function* () { + const result = yield* Effect.succeed('test-result'); + expect(result).toBe('test-result'); + }).pipe( + Effect.provide( + effectLayer({ + dsn: TEST_DSN, + transport: getMockTransport(), + }), + ), + ), + ); + + it.effect('layer enables tracing when tracer is set', () => + Effect.gen(function* () { + const startInactiveSpanMock = vi.spyOn(sentryCore, 'startInactiveSpan'); + + const result = yield* Effect.withSpan('test-span')(Effect.succeed('traced')); + expect(result).toBe('traced'); + expect(startInactiveSpanMock).toHaveBeenCalledWith(expect.objectContaining({ name: 'test-span' })); + }).pipe( + Effect.withTracer(SentryEffectTracer), + Effect.provide( + effectLayer({ + dsn: TEST_DSN, + transport: getMockTransport(), + }), + ), + ), + ); + + it.effect('layer can be composed with tracer layer', () => + Effect.gen(function* () { + const startInactiveSpanMock = vi.spyOn(sentryCore, 'startInactiveSpan'); + + const result = yield* Effect.succeed(42).pipe( + Effect.map(n => n * 2), + Effect.withSpan('computation'), + ); + expect(result).toBe(84); + expect(startInactiveSpanMock).toHaveBeenCalledWith(expect.objectContaining({ name: 'computation' })); + }).pipe( + Effect.provide( + Layer.mergeAll( + effectLayer({ + dsn: TEST_DSN, + transport: getMockTransport(), + }), + Layer.setTracer(SentryEffectTracer), + ), + ), + ), + ); + + it.effect('layer can be composed with logger layer', () => + Effect.gen(function* () { + yield* Effect.logInfo('test log'); + const result = yield* Effect.succeed('logged'); + expect(result).toBe('logged'); + }).pipe( + Effect.provide( + Layer.mergeAll( + effectLayer({ + dsn: TEST_DSN, + transport: getMockTransport(), + }), + Logger.replace(Logger.defaultLogger, SentryEffectLogger), + Logger.minimumLogLevel(LogLevel.All), + ), + ), + ), + ); + + it.effect('layer can be composed with all Effect features', () => + Effect.gen(function* () { + const startInactiveSpanMock = vi.spyOn(sentryCore, 'startInactiveSpan'); + + yield* Effect.logInfo('starting computation'); + const result = yield* Effect.succeed(42).pipe( + Effect.map(n => n * 2), + Effect.withSpan('computation'), + ); + yield* Effect.logInfo('computation complete'); + expect(result).toBe(84); + expect(startInactiveSpanMock).toHaveBeenCalledWith(expect.objectContaining({ name: 'computation' })); + }).pipe( + Effect.provide( + Layer.mergeAll( + effectLayer({ + dsn: TEST_DSN, + transport: getMockTransport(), + }), + Layer.setTracer(SentryEffectTracer), + Logger.replace(Logger.defaultLogger, SentryEffectLogger), + Logger.minimumLogLevel(LogLevel.All), + ), + ), + ), + ); +}); diff --git a/packages/effect/test/logger.test.ts b/packages/effect/test/logger.test.ts new file mode 100644 index 000000000000..c372784b483f --- /dev/null +++ b/packages/effect/test/logger.test.ts @@ -0,0 +1,104 @@ +import { describe, expect, it } from '@effect/vitest'; +import * as sentryCore from '@sentry/core'; +import { Effect, Layer, Logger, LogLevel } from 'effect'; +import { afterEach, vi } from 'vitest'; +import { SentryEffectLogger } from '../src/logger'; + +vi.mock('@sentry/core', async importOriginal => { + const original = await importOriginal(); + return { + ...original, + logger: { + ...original.logger, + error: vi.fn(), + warn: vi.fn(), + info: vi.fn(), + debug: vi.fn(), + trace: vi.fn(), + fatal: vi.fn(), + }, + }; +}); + +describe('SentryEffectLogger', () => { + afterEach(() => { + vi.clearAllMocks(); + }); + + const loggerLayer = Layer.mergeAll( + Logger.replace(Logger.defaultLogger, SentryEffectLogger), + Logger.minimumLogLevel(LogLevel.All), + ); + + it.effect('forwards fatal logs to Sentry', () => + Effect.gen(function* () { + yield* Effect.logFatal('This is a fatal message'); + expect(sentryCore.logger.fatal).toHaveBeenCalledWith('This is a fatal message'); + }).pipe(Effect.provide(loggerLayer)), + ); + + it.effect('forwards error logs to Sentry', () => + Effect.gen(function* () { + yield* Effect.logError('This is an error message'); + expect(sentryCore.logger.error).toHaveBeenCalledWith('This is an error message'); + }).pipe(Effect.provide(loggerLayer)), + ); + + it.effect('forwards warning logs to Sentry', () => + Effect.gen(function* () { + yield* Effect.logWarning('This is a warning message'); + expect(sentryCore.logger.warn).toHaveBeenCalledWith('This is a warning message'); + }).pipe(Effect.provide(loggerLayer)), + ); + + it.effect('forwards info logs to Sentry', () => + Effect.gen(function* () { + yield* Effect.logInfo('This is an info message'); + expect(sentryCore.logger.info).toHaveBeenCalledWith('This is an info message'); + }).pipe(Effect.provide(loggerLayer)), + ); + + it.effect('forwards debug logs to Sentry', () => + Effect.gen(function* () { + yield* Effect.logDebug('This is a debug message'); + expect(sentryCore.logger.debug).toHaveBeenCalledWith('This is a debug message'); + }).pipe(Effect.provide(loggerLayer)), + ); + + it.effect('forwards trace logs to Sentry', () => + Effect.gen(function* () { + yield* Effect.logTrace('This is a trace message'); + expect(sentryCore.logger.trace).toHaveBeenCalledWith('This is a trace message'); + }).pipe(Effect.provide(loggerLayer)), + ); + + it.effect('handles object messages by stringifying', () => + Effect.gen(function* () { + yield* Effect.logInfo({ key: 'value', nested: { foo: 'bar' } }); + expect(sentryCore.logger.info).toHaveBeenCalledWith('{"key":"value","nested":{"foo":"bar"}}'); + }).pipe(Effect.provide(loggerLayer)), + ); + + it.effect('handles multiple log calls', () => + Effect.gen(function* () { + yield* Effect.logInfo('First message'); + yield* Effect.logInfo('Second message'); + yield* Effect.logWarning('Third message'); + expect(sentryCore.logger.info).toHaveBeenCalledTimes(2); + expect(sentryCore.logger.info).toHaveBeenNthCalledWith(1, 'First message'); + expect(sentryCore.logger.info).toHaveBeenNthCalledWith(2, 'Second message'); + expect(sentryCore.logger.warn).toHaveBeenCalledWith('Third message'); + }).pipe(Effect.provide(loggerLayer)), + ); + + it.effect('works with Effect.tap for logging side effects', () => + Effect.gen(function* () { + const result = yield* Effect.succeed('data').pipe( + Effect.tap(data => Effect.logInfo(`Processing: ${data}`)), + Effect.map(d => d.toUpperCase()), + ); + expect(result).toBe('DATA'); + expect(sentryCore.logger.info).toHaveBeenCalledWith('Processing: data'); + }).pipe(Effect.provide(loggerLayer)), + ); +}); diff --git a/packages/effect/test/metrics.test.ts b/packages/effect/test/metrics.test.ts new file mode 100644 index 000000000000..8c2b092b967f --- /dev/null +++ b/packages/effect/test/metrics.test.ts @@ -0,0 +1,321 @@ +import { describe, expect, it } from '@effect/vitest'; +import * as sentryCore from '@sentry/core'; +import { Duration, Effect, Metric, MetricBoundaries, MetricLabel } from 'effect'; +import { afterEach, beforeEach, vi } from 'vitest'; +import { createMetricsFlusher } from '../src/metrics'; + +describe('SentryEffectMetricsLayer', () => { + const mockCount = vi.fn(); + const mockGauge = vi.fn(); + const mockDistribution = vi.fn(); + + beforeEach(() => { + vi.spyOn(sentryCore.metrics, 'count').mockImplementation(mockCount); + vi.spyOn(sentryCore.metrics, 'gauge').mockImplementation(mockGauge); + vi.spyOn(sentryCore.metrics, 'distribution').mockImplementation(mockDistribution); + }); + + afterEach(() => { + vi.clearAllMocks(); + vi.restoreAllMocks(); + }); + + it.effect('creates counter metrics', () => + Effect.gen(function* () { + const counter = Metric.counter('test_counter'); + + yield* Metric.increment(counter); + yield* Metric.increment(counter); + yield* Metric.incrementBy(counter, 5); + + const snapshot = Metric.unsafeSnapshot(); + const counterMetric = snapshot.find(p => p.metricKey.name === 'test_counter'); + + expect(counterMetric).toBeDefined(); + }), + ); + + it.effect('creates gauge metrics', () => + Effect.gen(function* () { + const gauge = Metric.gauge('test_gauge'); + + yield* Metric.set(gauge, 42); + + const snapshot = Metric.unsafeSnapshot(); + const gaugeMetric = snapshot.find(p => p.metricKey.name === 'test_gauge'); + + expect(gaugeMetric).toBeDefined(); + }), + ); + + it.effect('creates histogram metrics', () => + Effect.gen(function* () { + const histogram = Metric.histogram('test_histogram', MetricBoundaries.linear({ start: 0, width: 10, count: 10 })); + + yield* Metric.update(histogram, 5); + yield* Metric.update(histogram, 15); + yield* Metric.update(histogram, 25); + + const snapshot = Metric.unsafeSnapshot(); + const histogramMetric = snapshot.find(p => p.metricKey.name === 'test_histogram'); + + expect(histogramMetric).toBeDefined(); + }), + ); + + it.effect('creates summary metrics', () => + Effect.gen(function* () { + const summary = Metric.summary({ + name: 'test_summary', + maxAge: '1 minute', + maxSize: 100, + error: 0.01, + quantiles: [0.5, 0.9, 0.99], + }); + + yield* Metric.update(summary, 10); + yield* Metric.update(summary, 20); + yield* Metric.update(summary, 30); + + const snapshot = Metric.unsafeSnapshot(); + const summaryMetric = snapshot.find(p => p.metricKey.name === 'test_summary'); + + expect(summaryMetric).toBeDefined(); + }), + ); + + it.effect('creates frequency metrics', () => + Effect.gen(function* () { + const frequency = Metric.frequency('test_frequency'); + + yield* Metric.update(frequency, 'foo'); + yield* Metric.update(frequency, 'bar'); + yield* Metric.update(frequency, 'foo'); + + const snapshot = Metric.unsafeSnapshot(); + const frequencyMetric = snapshot.find(p => p.metricKey.name === 'test_frequency'); + + expect(frequencyMetric).toBeDefined(); + }), + ); + + it.effect('supports metrics with labels', () => + Effect.gen(function* () { + const counter = Metric.counter('labeled_counter').pipe( + Metric.taggedWithLabels([MetricLabel.make('env', 'test'), MetricLabel.make('service', 'my-service')]), + ); + + yield* Metric.increment(counter); + + const snapshot = Metric.unsafeSnapshot(); + const labeledMetric = snapshot.find(p => p.metricKey.name === 'labeled_counter'); + + expect(labeledMetric).toBeDefined(); + const tags = labeledMetric?.metricKey.tags ?? []; + expect(tags.some(t => t.key === 'env' && t.value === 'test')).toBe(true); + expect(tags.some(t => t.key === 'service' && t.value === 'my-service')).toBe(true); + }), + ); + + it.effect('tracks Effect durations with timer metric', () => + Effect.gen(function* () { + const timer = Metric.timerWithBoundaries('operation_duration', [10, 50, 100, 500, 1000]); + + yield* Effect.succeed('done').pipe(Metric.trackDuration(timer)); + + const snapshot = Metric.unsafeSnapshot(); + const timerMetric = snapshot.find(p => p.metricKey.name === 'operation_duration'); + + expect(timerMetric).toBeDefined(); + }), + ); + + it.effect('integrates with Effect.timed', () => + Effect.gen(function* () { + const [duration, result] = yield* Effect.timed(Effect.succeed('completed')); + + expect(result).toBe('completed'); + expect(Duration.toMillis(duration)).toBeGreaterThanOrEqual(0); + }), + ); +}); + +describe('createMetricsFlusher', () => { + const mockCount = vi.fn(); + const mockGauge = vi.fn(); + const mockDistribution = vi.fn(); + + beforeEach(() => { + vi.spyOn(sentryCore.metrics, 'count').mockImplementation(mockCount); + vi.spyOn(sentryCore.metrics, 'gauge').mockImplementation(mockGauge); + vi.spyOn(sentryCore.metrics, 'distribution').mockImplementation(mockDistribution); + }); + + afterEach(() => { + vi.clearAllMocks(); + vi.restoreAllMocks(); + }); + + it.effect('sends counter metrics to Sentry', () => + Effect.gen(function* () { + const flusher = createMetricsFlusher(); + const counter = Metric.counter('flush_test_counter'); + + yield* Metric.increment(counter); + yield* Metric.incrementBy(counter, 4); + + flusher.flush(); + + expect(mockCount).toHaveBeenCalledWith('flush_test_counter', 5, { attributes: {} }); + }), + ); + + it.effect('sends gauge metrics to Sentry', () => + Effect.gen(function* () { + const flusher = createMetricsFlusher(); + const gauge = Metric.gauge('flush_test_gauge'); + + yield* Metric.set(gauge, 42); + + flusher.flush(); + + expect(mockGauge).toHaveBeenCalledWith('flush_test_gauge', 42, { attributes: {} }); + }), + ); + + it.effect('sends histogram metrics to Sentry', () => + Effect.gen(function* () { + const flusher = createMetricsFlusher(); + const histogram = Metric.histogram( + 'flush_test_histogram', + MetricBoundaries.linear({ start: 0, width: 10, count: 5 }), + ); + + yield* Metric.update(histogram, 5); + yield* Metric.update(histogram, 15); + + flusher.flush(); + + expect(mockGauge).toHaveBeenCalledWith('flush_test_histogram.sum', expect.any(Number), { attributes: {} }); + expect(mockGauge).toHaveBeenCalledWith('flush_test_histogram.count', expect.any(Number), { attributes: {} }); + expect(mockGauge).toHaveBeenCalledWith('flush_test_histogram.min', expect.any(Number), { attributes: {} }); + expect(mockGauge).toHaveBeenCalledWith('flush_test_histogram.max', expect.any(Number), { attributes: {} }); + }), + ); + + it.effect('sends summary metrics to Sentry', () => + Effect.gen(function* () { + const flusher = createMetricsFlusher(); + const summary = Metric.summary({ + name: 'flush_test_summary', + maxAge: '1 minute', + maxSize: 100, + error: 0.01, + quantiles: [0.5, 0.9, 0.99], + }); + + yield* Metric.update(summary, 10); + yield* Metric.update(summary, 20); + yield* Metric.update(summary, 30); + + flusher.flush(); + + expect(mockGauge).toHaveBeenCalledWith('flush_test_summary.sum', 60, { attributes: {} }); + expect(mockGauge).toHaveBeenCalledWith('flush_test_summary.count', 3, { attributes: {} }); + expect(mockGauge).toHaveBeenCalledWith('flush_test_summary.min', 10, { attributes: {} }); + expect(mockGauge).toHaveBeenCalledWith('flush_test_summary.max', 30, { attributes: {} }); + }), + ); + + it.effect('sends frequency metrics to Sentry', () => + Effect.gen(function* () { + const flusher = createMetricsFlusher(); + const frequency = Metric.frequency('flush_test_frequency'); + + yield* Metric.update(frequency, 'apple'); + yield* Metric.update(frequency, 'banana'); + yield* Metric.update(frequency, 'apple'); + + flusher.flush(); + + expect(mockCount).toHaveBeenCalledWith('flush_test_frequency', 2, { attributes: { word: 'apple' } }); + expect(mockCount).toHaveBeenCalledWith('flush_test_frequency', 1, { attributes: { word: 'banana' } }); + }), + ); + + it.effect('sends metrics with labels as attributes to Sentry', () => + Effect.gen(function* () { + const flusher = createMetricsFlusher(); + const gauge = Metric.gauge('flush_test_labeled_gauge').pipe( + Metric.taggedWithLabels([MetricLabel.make('env', 'production'), MetricLabel.make('region', 'us-east')]), + ); + + yield* Metric.set(gauge, 100); + + flusher.flush(); + + expect(mockGauge).toHaveBeenCalledWith('flush_test_labeled_gauge', 100, { + attributes: { env: 'production', region: 'us-east' }, + }); + }), + ); + + it.effect('sends counter delta values on subsequent flushes', () => + Effect.gen(function* () { + const flusher = createMetricsFlusher(); + const counter = Metric.counter('flush_test_delta_counter'); + + yield* Metric.incrementBy(counter, 10); + flusher.flush(); + + mockCount.mockClear(); + + yield* Metric.incrementBy(counter, 5); + flusher.flush(); + + expect(mockCount).toHaveBeenCalledWith('flush_test_delta_counter', 5, { attributes: {} }); + }), + ); + + it.effect('does not send counter when delta is zero', () => + Effect.gen(function* () { + const flusher = createMetricsFlusher(); + const counter = Metric.counter('flush_test_zero_delta'); + + yield* Metric.incrementBy(counter, 10); + flusher.flush(); + + mockCount.mockClear(); + + flusher.flush(); + + expect(mockCount).not.toHaveBeenCalledWith('flush_test_zero_delta', 0, { attributes: {} }); + }), + ); + + it.effect('clear() resets delta tracking state', () => + Effect.gen(function* () { + const flusher = createMetricsFlusher(); + const counter = Metric.counter('flush_test_clear_counter'); + + yield* Metric.incrementBy(counter, 10); + flusher.flush(); + + mockCount.mockClear(); + flusher.clear(); + + flusher.flush(); + + expect(mockCount).toHaveBeenCalledWith('flush_test_clear_counter', 10, { attributes: {} }); + }), + ); + + it('each flusher has isolated state', () => { + const flusher1 = createMetricsFlusher(); + const flusher2 = createMetricsFlusher(); + + expect(flusher1).not.toBe(flusher2); + expect(flusher1.flush).not.toBe(flusher2.flush); + expect(flusher1.clear).not.toBe(flusher2.clear); + }); +}); diff --git a/packages/effect/test/tracer.test.ts b/packages/effect/test/tracer.test.ts new file mode 100644 index 000000000000..9583e7d12c5b --- /dev/null +++ b/packages/effect/test/tracer.test.ts @@ -0,0 +1,284 @@ +import { describe, expect, it } from '@effect/vitest'; +import * as sentryCore from '@sentry/core'; +import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; +import { Effect, Layer } from 'effect'; +import { afterEach, vi } from 'vitest'; +import { SentryEffectTracer } from '../src/tracer'; + +const TracerLayer = Layer.setTracer(SentryEffectTracer); + +describe('SentryEffectTracer', () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it.effect('traces Effect spans to Sentry', () => + Effect.gen(function* () { + let capturedSpanName: string | undefined; + + yield* Effect.withSpan('test-parent-span')( + Effect.gen(function* () { + yield* Effect.annotateCurrentSpan('test-attribute', 'test-value'); + capturedSpanName = 'effect-span-executed'; + }), + ); + + expect(capturedSpanName).toBe('effect-span-executed'); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('creates spans with correct attributes', () => + Effect.gen(function* () { + const result = yield* Effect.withSpan('my-operation')(Effect.succeed('success')); + + expect(result).toBe('success'); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('handles nested spans', () => + Effect.gen(function* () { + const result = yield* Effect.withSpan('outer')( + Effect.gen(function* () { + const inner = yield* Effect.withSpan('inner')(Effect.succeed('inner-result')); + return `outer-${inner}`; + }), + ); + + expect(result).toBe('outer-inner-result'); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('propagates span context through Effect fibers', () => + Effect.gen(function* () { + const results: string[] = []; + + yield* Effect.withSpan('parent')( + Effect.gen(function* () { + results.push('parent-start'); + yield* Effect.withSpan('child-1')(Effect.sync(() => results.push('child-1'))); + yield* Effect.withSpan('child-2')(Effect.sync(() => results.push('child-2'))); + results.push('parent-end'); + }), + ); + + expect(results).toEqual(['parent-start', 'child-1', 'child-2', 'parent-end']); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('handles span failures correctly', () => + Effect.gen(function* () { + const result = yield* Effect.withSpan('failing-span')(Effect.fail('expected-error')).pipe( + Effect.catchAll(e => Effect.succeed(`caught: ${e}`)), + ); + + expect(result).toBe('caught: expected-error'); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('handles span with defects (die)', () => + Effect.gen(function* () { + const result = yield* Effect.withSpan('defect-span')(Effect.die('defect-value')).pipe( + Effect.catchAllDefect(d => Effect.succeed(`caught-defect: ${d}`)), + ); + + expect(result).toBe('caught-defect: defect-value'); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('works with Effect.all for parallel operations', () => + Effect.gen(function* () { + const results = yield* Effect.withSpan('parallel-parent')( + Effect.all([ + Effect.withSpan('task-1')(Effect.succeed(1)), + Effect.withSpan('task-2')(Effect.succeed(2)), + Effect.withSpan('task-3')(Effect.succeed(3)), + ]), + ); + + expect(results).toEqual([1, 2, 3]); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('supports span annotations', () => + Effect.gen(function* () { + const result = yield* Effect.succeed('annotated').pipe( + Effect.withSpan('annotated-span'), + Effect.tap(() => Effect.annotateCurrentSpan('custom-key', 'custom-value')), + ); + + expect(result).toBe('annotated'); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('sets span status to ok on success', () => + Effect.gen(function* () { + const setStatusCalls: Array<{ code: number; message?: string }> = []; + + const mockStartInactiveSpan = vi.spyOn(sentryCore, 'startInactiveSpan').mockImplementation(_options => { + return { + spanContext: () => ({ spanId: 'test-span-id', traceId: 'test-trace-id' }), + isRecording: () => true, + setAttribute: vi.fn(), + setStatus: (status: { code: number; message?: string }) => setStatusCalls.push(status), + addEvent: vi.fn(), + end: vi.fn(), + } as unknown as sentryCore.Span; + }); + + yield* Effect.withSpan('success-span')(Effect.succeed('ok')); + + expect(setStatusCalls).toContainEqual({ code: 1 }); + + mockStartInactiveSpan.mockRestore(); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('sets span status to error on failure', () => + Effect.gen(function* () { + const setStatusCalls: Array<{ code: number; message?: string }> = []; + + const mockStartInactiveSpan = vi.spyOn(sentryCore, 'startInactiveSpan').mockImplementation(_options => { + return { + spanContext: () => ({ spanId: 'test-span-id', traceId: 'test-trace-id' }), + isRecording: () => true, + setAttribute: vi.fn(), + setStatus: (status: { code: number; message?: string }) => setStatusCalls.push(status), + addEvent: vi.fn(), + end: vi.fn(), + } as unknown as sentryCore.Span; + }); + + yield* Effect.withSpan('error-span')(Effect.fail('test-error')).pipe(Effect.catchAll(() => Effect.void)); + + expect(setStatusCalls).toContainEqual({ code: 2, message: 'test-error' }); + + mockStartInactiveSpan.mockRestore(); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('sets span status to error on defect', () => + Effect.gen(function* () { + const setStatusCalls: Array<{ code: number; message?: string }> = []; + + const mockStartInactiveSpan = vi.spyOn(sentryCore, 'startInactiveSpan').mockImplementation(_options => { + return { + spanContext: () => ({ spanId: 'test-span-id', traceId: 'test-trace-id' }), + isRecording: () => true, + setAttribute: vi.fn(), + setStatus: (status: { code: number; message?: string }) => setStatusCalls.push(status), + addEvent: vi.fn(), + end: vi.fn(), + } as unknown as sentryCore.Span; + }); + + yield* Effect.withSpan('defect-span')(Effect.die('fatal-defect')).pipe(Effect.catchAllDefect(() => Effect.void)); + + expect(setStatusCalls).toContainEqual({ code: 2, message: 'fatal-defect' }); + + mockStartInactiveSpan.mockRestore(); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('propagates Sentry span context via withActiveSpan', () => + Effect.gen(function* () { + const withActiveSpanCalls: sentryCore.Span[] = []; + + const mockWithActiveSpan = vi + .spyOn(sentryCore, 'withActiveSpan') + .mockImplementation((span: sentryCore.Span | null, callback: (scope: sentryCore.Scope) => T): T => { + if (span) { + withActiveSpanCalls.push(span); + } + return callback({} as sentryCore.Scope); + }); + + yield* Effect.withSpan('context-span')(Effect.succeed('done')); + + expect(withActiveSpanCalls.length).toBeGreaterThan(0); + + mockWithActiveSpan.mockRestore(); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('sets origin to auto.function.effect for regular spans', () => + Effect.gen(function* () { + let capturedAttributes: Record | undefined; + + const mockStartInactiveSpan = vi.spyOn(sentryCore, 'startInactiveSpan').mockImplementation(options => { + capturedAttributes = options.attributes; + return { + spanContext: () => ({ spanId: 'test-span-id', traceId: 'test-trace-id' }), + isRecording: () => true, + setAttribute: vi.fn(), + setStatus: vi.fn(), + addEvent: vi.fn(), + end: vi.fn(), + } as unknown as sentryCore.Span; + }); + + yield* Effect.withSpan('my-operation')(Effect.succeed('ok')); + + expect(capturedAttributes).toBeDefined(); + expect(capturedAttributes?.[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toBe('auto.function.effect'); + + mockStartInactiveSpan.mockRestore(); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('sets origin to auto.http.effect for http.server spans', () => + Effect.gen(function* () { + let capturedAttributes: Record | undefined; + + const mockStartInactiveSpan = vi.spyOn(sentryCore, 'startInactiveSpan').mockImplementation(options => { + capturedAttributes = options.attributes; + return { + spanContext: () => ({ spanId: 'test-span-id', traceId: 'test-trace-id' }), + isRecording: () => true, + setAttribute: vi.fn(), + setStatus: vi.fn(), + addEvent: vi.fn(), + end: vi.fn(), + } as unknown as sentryCore.Span; + }); + + yield* Effect.withSpan('http.server GET /api/users')(Effect.succeed('ok')); + + expect(capturedAttributes).toBeDefined(); + expect(capturedAttributes?.[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toBe('auto.http.effect'); + + mockStartInactiveSpan.mockRestore(); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('sets origin to auto.http.effect for http.client spans', () => + Effect.gen(function* () { + let capturedAttributes: Record | undefined; + + const mockStartInactiveSpan = vi.spyOn(sentryCore, 'startInactiveSpan').mockImplementation(options => { + capturedAttributes = options.attributes; + return { + spanContext: () => ({ spanId: 'test-span-id', traceId: 'test-trace-id' }), + isRecording: () => true, + setAttribute: vi.fn(), + setStatus: vi.fn(), + addEvent: vi.fn(), + end: vi.fn(), + } as unknown as sentryCore.Span; + }); + + yield* Effect.withSpan('http.client GET https://api.example.com')(Effect.succeed('ok')); + + expect(capturedAttributes).toBeDefined(); + expect(capturedAttributes?.[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toBe('auto.http.effect'); + + mockStartInactiveSpan.mockRestore(); + }).pipe(Effect.provide(TracerLayer)), + ); + + it.effect('can be used with Effect.withTracer', () => + Effect.gen(function* () { + const result = yield* Effect.withSpan('inline-tracer-span')(Effect.succeed('with-tracer')); + expect(result).toBe('with-tracer'); + }).pipe(Effect.withTracer(SentryEffectTracer)), + ); +}); diff --git a/packages/effect/tsconfig.json b/packages/effect/tsconfig.json new file mode 100644 index 000000000000..d49b053b37f8 --- /dev/null +++ b/packages/effect/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "module": "esnext", + "moduleResolution": "bundler", + "outDir": "build" + }, + "include": ["src/**/*"] +} diff --git a/packages/effect/tsconfig.test.json b/packages/effect/tsconfig.test.json new file mode 100644 index 000000000000..9dd90014ef37 --- /dev/null +++ b/packages/effect/tsconfig.test.json @@ -0,0 +1,9 @@ +{ + "extends": "./tsconfig.json", + + "include": ["test/**/*", "vitest.config.ts"], + + "compilerOptions": { + "types": ["node"] + } +} diff --git a/packages/effect/tsconfig.types.json b/packages/effect/tsconfig.types.json new file mode 100644 index 000000000000..76eb1a9bb7c3 --- /dev/null +++ b/packages/effect/tsconfig.types.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "declaration": true, + "declarationMap": true, + "emitDeclarationOnly": true, + "outDir": "build/types" + }, + "include": ["src/**/*"] +} diff --git a/packages/effect/tsconfig.vite.json b/packages/effect/tsconfig.vite.json new file mode 100644 index 000000000000..4f2b7371b076 --- /dev/null +++ b/packages/effect/tsconfig.vite.json @@ -0,0 +1,9 @@ +{ + "extends": "./tsconfig.json", + + "include": ["vite.config.ts", "vitest.config.ts"], + + "compilerOptions": { + "types": ["node"] + } +} diff --git a/packages/effect/vitest.config.ts b/packages/effect/vitest.config.ts new file mode 100644 index 000000000000..ed62557713c1 --- /dev/null +++ b/packages/effect/vitest.config.ts @@ -0,0 +1,10 @@ +import { defineConfig } from 'vitest/config'; +import baseConfig from '../../vite/vite.config'; + +export default defineConfig({ + ...baseConfig, + test: { + ...baseConfig.test, + include: ['test/**/*.test.ts'], + }, +}); diff --git a/packages/ember/package.json b/packages/ember/package.json index 64c0e88aac76..a450d2394695 100644 --- a/packages/ember/package.json +++ b/packages/ember/package.json @@ -21,9 +21,9 @@ "clean": "yarn rimraf sentry-ember-*.tgz dist tmp build .node_modules.ember-try package.json.ember-try instance-initializers index.d.ts runloop.d.ts types.d.ts", "lint": "run-p lint:js lint:hbs lint:ts", "lint:hbs": "ember-template-lint .", - "lint:js": "oxlint .", + "lint:js": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:ts": "tsc", - "fix": "oxlint . --fix", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "start": "ember serve", "test": "ember b --prod && ember test", "prepack": "ember ts:precompile", diff --git a/packages/eslint-plugin-sdk/package.json b/packages/eslint-plugin-sdk/package.json index a6010484e981..2a6de9a8b076 100644 --- a/packages/eslint-plugin-sdk/package.json +++ b/packages/eslint-plugin-sdk/package.json @@ -23,8 +23,8 @@ }, "scripts": { "clean": "yarn rimraf sentry-internal-eslint-plugin-sdk-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "test": "vitest run", "test:watch": "vitest --watch", "build:tarball": "npm pack", diff --git a/packages/feedback/package.json b/packages/feedback/package.json index a6acc877803d..e3d13506299f 100644 --- a/packages/feedback/package.json +++ b/packages/feedback/package.json @@ -59,8 +59,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build sentry-internal-feedback-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/{bundles,npm/cjs}/*.js && es-check es2020 ./build/npm/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/gatsby/package.json b/packages/gatsby/package.json index 59e435fbb87f..acbfae748317 100644 --- a/packages/gatsby/package.json +++ b/packages/gatsby/package.json @@ -74,8 +74,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage *.d.ts sentry-gatsby-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/google-cloud-serverless/package.json b/packages/google-cloud-serverless/package.json index f4950445bf75..3005c776435d 100644 --- a/packages/google-cloud-serverless/package.json +++ b/packages/google-cloud-serverless/package.json @@ -80,8 +80,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-google-cloud-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/cjs/*.js && es-check es2022 ./build/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/google-cloud-serverless/test/gcpfunction/http.test.ts b/packages/google-cloud-serverless/test/gcpfunction/http.test.ts index 56781f6a8190..30f6b5af68ce 100644 --- a/packages/google-cloud-serverless/test/gcpfunction/http.test.ts +++ b/packages/google-cloud-serverless/test/gcpfunction/http.test.ts @@ -63,7 +63,7 @@ describe('GCPFunction', () => { try { fn(req, res); - } catch (error) { + } catch { res.end(); } }); diff --git a/packages/hono/README.md b/packages/hono/README.md index 23d9487a0295..c359536c656e 100644 --- a/packages/hono/README.md +++ b/packages/hono/README.md @@ -48,15 +48,15 @@ compatibility_flags = ["nodejs_compat"] Initialize the Sentry Hono middleware as early as possible in your app: ```typescript +import { Hono } from 'hono'; import { sentry } from '@sentry/hono/cloudflare'; const app = new Hono(); // Initialize Sentry middleware right after creating the app app.use( - '*', sentry(app, { - dsn: 'your-sentry-dsn', + dsn: '__DSN__', // ...other Sentry options }), ); @@ -65,3 +65,64 @@ app.use( export default app; ``` + +#### Access `env` from Cloudflare Worker bindings + +Pass the options as a callback instead of a plain options object. The function receives the Cloudflare Worker `env` as defined in the Worker's `Bindings`: + +```typescript +import { Hono } from 'hono'; +import { sentry } from '@sentry/hono/cloudflare'; + +type Bindings = { SENTRY_DSN: string }; + +const app = new Hono<{ Bindings: Bindings }>(); + +app.use(sentry(app, env => ({ dsn: env.SENTRY_DSN }))); + +export default app; +``` + +## Setup (Node) + +### 1. Initialize Sentry in your Hono app + +Initialize the Sentry Hono middleware as early as possible in your app: + +```ts +import { Hono } from 'hono'; +import { serve } from '@hono/node-server'; +import { sentry } from '@sentry/hono/node'; + +const app = new Hono(); + +// Initialize Sentry middleware right after creating the app +app.use( + sentry(app, { + dsn: '__DSN__', // or process.env.SENTRY_DSN + tracesSampleRate: 1.0, + }), +); + +// ... your routes and other middleware + +serve(app); +``` + +### 2. Add `preload` script to start command + +To ensure that Sentry can capture spans from third-party libraries (e.g. database clients) used in your Hono app, Sentry needs to wrap these libraries as early as possible. + +When starting the Hono Node application, use the `@sentry/node/preload` hook with the `--import` CLI option to ensure modules are wrapped before the application code runs: + +```bash +node --import @sentry/node/preload index.js +``` + +This option can also be added to the `NODE_OPTIONS` environment variable: + +```bash +NODE_OPTIONS="--import @sentry/node/preload" +``` + +Read more about this preload script in the docs: https://docs.sentry.io/platforms/javascript/guides/hono/install/late-initialization/#late-initialization-with-esm diff --git a/packages/hono/package.json b/packages/hono/package.json index c371aad129db..0ccea9cb15bb 100644 --- a/packages/hono/package.json +++ b/packages/hono/package.json @@ -36,6 +36,16 @@ "types": "./build/types/index.cloudflare.d.ts", "default": "./build/cjs/index.cloudflare.js" } + }, + "./node": { + "import": { + "types": "./build/types/index.node.d.ts", + "default": "./build/esm/index.node.js" + }, + "require": { + "types": "./build/types/index.node.d.ts", + "default": "./build/cjs/index.node.js" + } } }, "typesVersions": { @@ -45,6 +55,9 @@ ], "build/types/index.cloudflare.d.ts": [ "build/types-ts3.8/index.cloudflare.d.ts" + ], + "build/types/index.node.d.ts": [ + "build/types-ts3.8/index.node.d.ts" ] } }, @@ -84,8 +97,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-hono-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/cjs/*.js && es-check es2022 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/hono/rollup.npm.config.mjs b/packages/hono/rollup.npm.config.mjs index 6f491584a9d0..a60ba1312cc9 100644 --- a/packages/hono/rollup.npm.config.mjs +++ b/packages/hono/rollup.npm.config.mjs @@ -1,7 +1,7 @@ import { makeBaseNPMConfig, makeNPMConfigVariants } from '@sentry-internal/rollup-utils'; const baseConfig = makeBaseNPMConfig({ - entrypoints: ['src/index.ts', 'src/index.cloudflare.ts'], + entrypoints: ['src/index.ts', 'src/index.cloudflare.ts', 'src/index.node.ts'], packageSpecificConfig: { output: { preserveModulesRoot: 'src', diff --git a/packages/hono/src/cloudflare/middleware.ts b/packages/hono/src/cloudflare/middleware.ts index ffcdf5e40346..1769bbd141a6 100644 --- a/packages/hono/src/cloudflare/middleware.ts +++ b/packages/hono/src/cloudflare/middleware.ts @@ -1,46 +1,45 @@ import { withSentry } from '@sentry/cloudflare'; -import { - applySdkMetadata, - type BaseTransportOptions, - debug, - getIntegrationsToSetup, - type Integration, - type Options, -} from '@sentry/core'; -import type { Context, Hono, MiddlewareHandler } from 'hono'; +import { applySdkMetadata, type BaseTransportOptions, debug, getIntegrationsToSetup, type Options } from '@sentry/core'; +import type { Env, Hono, MiddlewareHandler } from 'hono'; import { requestHandler, responseHandler } from '../shared/middlewareHandlers'; import { patchAppUse } from '../shared/patchAppUse'; +import { filterHonoIntegration } from '../shared/filterHonoIntegration'; -export interface HonoOptions extends Options { - context?: Context; -} - -const filterHonoIntegration = (integration: Integration): boolean => integration.name !== 'Hono'; - -export const sentry = (app: Hono, options: HonoOptions | undefined = {}): MiddlewareHandler => { - const isDebug = options.debug; +export interface HonoCloudflareOptions extends Options {} - isDebug && debug.log('Initialized Sentry Hono middleware (Cloudflare)'); - - applySdkMetadata(options, 'hono'); - - const { integrations: userIntegrations } = options; +/** + * Sentry middleware for Hono on Cloudflare Workers. + */ +export function sentry( + app: Hono, + options: HonoCloudflareOptions | ((env: E['Bindings']) => HonoCloudflareOptions), +): MiddlewareHandler { withSentry( - () => ({ - ...options, - // Always filter out the Hono integration from defaults and user integrations. - // The Hono integration is already set up by withSentry, so adding it again would cause capturing too early (in Cloudflare SDK) and non-parametrized URLs. - integrations: Array.isArray(userIntegrations) - ? defaults => - getIntegrationsToSetup({ - defaultIntegrations: defaults.filter(filterHonoIntegration), - integrations: userIntegrations.filter(filterHonoIntegration), - }) - : typeof userIntegrations === 'function' - ? defaults => userIntegrations(defaults).filter(filterHonoIntegration) - : defaults => defaults.filter(filterHonoIntegration), - }), - app, + env => { + const honoOptions = typeof options === 'function' ? options(env as E['Bindings']) : options; + + applySdkMetadata(honoOptions, 'hono', ['hono', 'cloudflare']); + + honoOptions.debug && debug.log('Initialized Sentry Hono middleware (Cloudflare)'); + + const { integrations: userIntegrations } = honoOptions; + return { + ...honoOptions, + // Always filter out the Hono integration from defaults and user integrations. + // The Hono integration is already set up by withSentry, so adding it again would cause capturing too early (in Cloudflare SDK) and non-parametrized URLs. + integrations: Array.isArray(userIntegrations) + ? defaults => + getIntegrationsToSetup({ + defaultIntegrations: defaults.filter(filterHonoIntegration), + integrations: userIntegrations.filter(filterHonoIntegration), + }) + : typeof userIntegrations === 'function' + ? defaults => userIntegrations(defaults).filter(filterHonoIntegration) + : defaults => defaults.filter(filterHonoIntegration), + }; + }, + // Cast needed because Hono exposes a narrower fetch signature than ExportedHandler + app as unknown as ExportedHandler, ); patchAppUse(app); @@ -52,4 +51,4 @@ export const sentry = (app: Hono, options: HonoOptions | undefined = {}): Middle responseHandler(context); }; -}; +} diff --git a/packages/hono/src/index.cloudflare.ts b/packages/hono/src/index.cloudflare.ts index cba517e1d295..99c04597a98f 100644 --- a/packages/hono/src/index.cloudflare.ts +++ b/packages/hono/src/index.cloudflare.ts @@ -1 +1,3 @@ export { sentry } from './cloudflare/middleware'; + +export * from '@sentry/cloudflare'; diff --git a/packages/hono/src/index.node.ts b/packages/hono/src/index.node.ts new file mode 100644 index 000000000000..02e94b67be89 --- /dev/null +++ b/packages/hono/src/index.node.ts @@ -0,0 +1,5 @@ +export { sentry } from './node/middleware'; + +export * from '@sentry/node'; + +export { init } from './node/sdk'; diff --git a/packages/hono/src/node/middleware.ts b/packages/hono/src/node/middleware.ts new file mode 100644 index 000000000000..1dbca92d02e5 --- /dev/null +++ b/packages/hono/src/node/middleware.ts @@ -0,0 +1,28 @@ +import { type BaseTransportOptions, debug, type Options } from '@sentry/core'; +import { init } from './sdk'; +import type { Hono, MiddlewareHandler } from 'hono'; +import { patchAppUse } from '../shared/patchAppUse'; +import { requestHandler, responseHandler } from '../shared/middlewareHandlers'; + +export interface HonoNodeOptions extends Options {} + +/** + * Sentry middleware for Hono running in a Node runtime environment. + */ +export const sentry = (app: Hono, options: HonoNodeOptions | undefined = {}): MiddlewareHandler => { + const isDebug = options.debug; + + isDebug && debug.log('Initialized Sentry Hono middleware (Node)'); + + init(options); + + patchAppUse(app); + + return async (context, next) => { + requestHandler(context); + + await next(); // Handler runs in between Request above ⤴ and Response below ⤵ + + responseHandler(context); + }; +}; diff --git a/packages/hono/src/node/sdk.ts b/packages/hono/src/node/sdk.ts new file mode 100644 index 000000000000..ff71ffe55909 --- /dev/null +++ b/packages/hono/src/node/sdk.ts @@ -0,0 +1,34 @@ +import type { Client, Integration } from '@sentry/core'; +import { applySdkMetadata, getIntegrationsToSetup } from '@sentry/core'; +import { init as initNode } from '@sentry/node'; +import type { HonoNodeOptions } from './middleware'; +import { filterHonoIntegration } from '../shared/filterHonoIntegration'; + +/** + * Initializes Sentry for Hono running in a Node runtime environment. + * + * In general, it is recommended to initialize Sentry via the `sentry()` middleware, as it sets up everything by default and calls `init` internally. + * + * When manually calling `init`, add the `honoIntegration` to the `integrations` array to set up the Hono integration. + */ +export function init(options: HonoNodeOptions): Client | undefined { + applySdkMetadata(options, 'hono', ['hono', 'node']); + + const { integrations: userIntegrations } = options; + + // Remove Hono from the SDK defaults to prevent double instrumentation: @sentry/node + const filteredOptions: HonoNodeOptions = { + ...options, + integrations: Array.isArray(userIntegrations) + ? (defaults: Integration[]) => + getIntegrationsToSetup({ + defaultIntegrations: defaults.filter(filterHonoIntegration), + integrations: userIntegrations, // user's explicit Hono integration is preserved + }) + : typeof userIntegrations === 'function' + ? (defaults: Integration[]) => userIntegrations(defaults.filter(filterHonoIntegration)) + : (defaults: Integration[]) => defaults.filter(filterHonoIntegration), + }; + + return initNode(filteredOptions); +} diff --git a/packages/hono/src/shared/filterHonoIntegration.ts b/packages/hono/src/shared/filterHonoIntegration.ts new file mode 100644 index 000000000000..743dac8997d5 --- /dev/null +++ b/packages/hono/src/shared/filterHonoIntegration.ts @@ -0,0 +1,3 @@ +import type { Integration } from '@sentry/core'; + +export const filterHonoIntegration = (integration: Integration): boolean => integration.name !== 'Hono'; diff --git a/packages/hono/src/shared/middlewareHandlers.ts b/packages/hono/src/shared/middlewareHandlers.ts index 9745bcfa3988..a470733b47a8 100644 --- a/packages/hono/src/shared/middlewareHandlers.ts +++ b/packages/hono/src/shared/middlewareHandlers.ts @@ -1,9 +1,10 @@ -import { getIsolationScope } from '@sentry/cloudflare'; import { getActiveSpan, getClient, getDefaultIsolationScope, + getIsolationScope, getRootSpan, + SEMANTIC_ATTRIBUTE_SENTRY_SOURCE, updateSpanName, winterCGRequestToRequestData, } from '@sentry/core'; @@ -32,14 +33,18 @@ export function responseHandler(context: Context): void { const activeSpan = getActiveSpan(); if (activeSpan) { activeSpan.updateName(`${context.req.method} ${routePath(context)}`); - updateSpanName(getRootSpan(activeSpan), `${context.req.method} ${routePath(context)}`); + activeSpan.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_SOURCE, 'route'); + + const rootSpan = getRootSpan(activeSpan); + updateSpanName(rootSpan, `${context.req.method} ${routePath(context)}`); + rootSpan.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_SOURCE, 'route'); } getIsolationScope().setTransactionName(`${context.req.method} ${routePath(context)}`); if (context.error) { getClient()?.captureException(context.error, { - mechanism: { handled: false, type: 'auto.faas.hono.error_handler' }, + mechanism: { handled: false, type: 'auto.http.hono.context_error' }, }); } } diff --git a/packages/hono/src/shared/patchAppUse.ts b/packages/hono/src/shared/patchAppUse.ts index dfcd186dc38a..28c3c49e7193 100644 --- a/packages/hono/src/shared/patchAppUse.ts +++ b/packages/hono/src/shared/patchAppUse.ts @@ -6,7 +6,7 @@ import { SPAN_STATUS_OK, startInactiveSpan, } from '@sentry/core'; -import type { Hono, MiddlewareHandler } from 'hono'; +import type { Env, Hono, MiddlewareHandler } from 'hono'; const MIDDLEWARE_ORIGIN = 'auto.middleware.hono'; @@ -14,7 +14,7 @@ const MIDDLEWARE_ORIGIN = 'auto.middleware.hono'; * Patches `app.use` so that every middleware registered through it is automatically * wrapped in a Sentry span. Supports both forms: `app.use(...handlers)` and `app.use(path, ...handlers)`. */ -export function patchAppUse(app: Hono): void { +export function patchAppUse(app: Hono): void { app.use = new Proxy(app.use, { apply(target: typeof app.use, thisArg: typeof app, args: Parameters): ReturnType { const [first, ...rest] = args as [unknown, ...MiddlewareHandler[]]; diff --git a/packages/hono/test/cloudflare/middleware.test.ts b/packages/hono/test/cloudflare/middleware.test.ts index 08629d706e8b..ac512d41afee 100644 --- a/packages/hono/test/cloudflare/middleware.test.ts +++ b/packages/hono/test/cloudflare/middleware.test.ts @@ -25,7 +25,7 @@ describe('Hono Cloudflare Middleware', () => { }); describe('sentry middleware', () => { - it('calls applySdkMetadata with "hono"', () => { + it('calls applySdkMetadata with "hono" when the options callback is invoked', () => { const app = new Hono(); const options = { dsn: 'https://public@dsn.ingest.sentry.io/1337', @@ -33,8 +33,11 @@ describe('Hono Cloudflare Middleware', () => { sentry(app, options); + const optionsCallback = withSentryMock.mock.calls[0]?.[0]; + optionsCallback(); + expect(applySdkMetadataMock).toHaveBeenCalledTimes(1); - expect(applySdkMetadataMock).toHaveBeenCalledWith(options, 'hono'); + expect(applySdkMetadataMock).toHaveBeenCalledWith(options, 'hono', ['hono', 'cloudflare']); }); it('calls withSentry with modified options', () => { @@ -63,24 +66,13 @@ describe('Hono Cloudflare Middleware', () => { name: 'npm:@sentry/hono', version: SDK_VERSION, }, + { + name: 'npm:@sentry/cloudflare', + version: SDK_VERSION, + }, ]); }); - it('calls applySdkMetadata before withSentry', () => { - const app = new Hono(); - const options = { - dsn: 'https://public@dsn.ingest.sentry.io/1337', - }; - - sentry(app, options); - - // Verify applySdkMetadata was called before withSentry - const applySdkMetadataCallOrder = applySdkMetadataMock.mock.invocationCallOrder[0]; - const withSentryCallOrder = withSentryMock.mock.invocationCallOrder[0]; - - expect(applySdkMetadataCallOrder).toBeLessThan(withSentryCallOrder as number); - }); - it('preserves all user options', () => { const app = new Hono(); const options = { @@ -124,6 +116,53 @@ describe('Hono Cloudflare Middleware', () => { expect(middleware.constructor.name).toBe('AsyncFunction'); }); + + describe('when options is a function (env callback)', () => { + it('calls the options function with the env argument passed by withSentry', () => { + type Bindings = { SENTRY_DSN: string }; + const app = new Hono<{ Bindings: Bindings }>(); + const mockEnv: Bindings = { SENTRY_DSN: 'https://public@dsn.ingest.sentry.io/1337' }; + const optionsFn = vi.fn((env: Bindings) => ({ dsn: env.SENTRY_DSN })); + + sentry(app, optionsFn); + + const optionsCallback = withSentryMock.mock.calls[0]?.[0]; + optionsCallback(mockEnv); + + expect(optionsFn).toHaveBeenCalledTimes(1); + expect(optionsFn).toHaveBeenCalledWith(mockEnv); + }); + + it('uses the return value of the options function as configuration', () => { + type Bindings = { SENTRY_DSN: string }; + const app = new Hono<{ Bindings: Bindings }>(); + const mockEnv: Bindings = { SENTRY_DSN: 'https://public@dsn.ingest.sentry.io/1337' }; + + sentry(app, (env: Bindings) => ({ dsn: env.SENTRY_DSN, environment: 'production' })); + + const optionsCallback = withSentryMock.mock.calls[0]?.[0]; + const result = optionsCallback(mockEnv); + + expect(result.dsn).toBe('https://public@dsn.ingest.sentry.io/1337'); + expect(result.environment).toBe('production'); + }); + + it('calls applySdkMetadata with the options object returned by the function', () => { + type Bindings = { SENTRY_DSN: string }; + const app = new Hono<{ Bindings: Bindings }>(); + const mockEnv: Bindings = { SENTRY_DSN: 'https://public@dsn.ingest.sentry.io/1337' }; + const returnedOptions = { dsn: 'https://public@dsn.ingest.sentry.io/1337' }; + const optionsFn = vi.fn(() => returnedOptions); + + sentry(app, optionsFn); + + const optionsCallback = withSentryMock.mock.calls[0]?.[0]; + optionsCallback(mockEnv); + + expect(applySdkMetadataMock).toHaveBeenCalledTimes(1); + expect(applySdkMetadataMock).toHaveBeenCalledWith(returnedOptions, 'hono', ['hono', 'cloudflare']); + }); + }); }); describe('filters Hono integration from user-provided integrations', () => { diff --git a/packages/hono/test/node/middleware.test.ts b/packages/hono/test/node/middleware.test.ts new file mode 100644 index 000000000000..1473daf98acc --- /dev/null +++ b/packages/hono/test/node/middleware.test.ts @@ -0,0 +1,259 @@ +import * as SentryCore from '@sentry/core'; +import { SDK_VERSION } from '@sentry/core'; +import { Hono } from 'hono'; +import { beforeEach, describe, expect, it, type Mock, vi } from 'vitest'; +import { sentry } from '../../src/node/middleware'; +import type { Integration } from '@sentry/core'; + +vi.mock('@sentry/node', () => ({ + init: vi.fn(), +})); + +// eslint-disable-next-line @typescript-eslint/consistent-type-imports +const { init: initNodeMock } = await vi.importMock('@sentry/node'); + +vi.mock('@sentry/core', async () => { + const actual = await vi.importActual('@sentry/core'); + return { + ...actual, + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-ignore + applySdkMetadata: vi.fn(actual.applySdkMetadata), + }; +}); + +const applySdkMetadataMock = SentryCore.applySdkMetadata as Mock; + +describe('Hono Node Middleware', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + describe('sentry middleware', () => { + it('calls applySdkMetadata with "hono"', () => { + const app = new Hono(); + const options = { + dsn: 'https://public@dsn.ingest.sentry.io/1337', + }; + + sentry(app, options); + + expect(applySdkMetadataMock).toHaveBeenCalledTimes(1); + expect(applySdkMetadataMock).toHaveBeenCalledWith(options, 'hono', ['hono', 'node']); + }); + + it('calls init from @sentry/node', () => { + const app = new Hono(); + const options = { + dsn: 'https://public@dsn.ingest.sentry.io/1337', + }; + + sentry(app, options); + + expect(initNodeMock).toHaveBeenCalledTimes(1); + expect(initNodeMock).toHaveBeenCalledWith( + expect.objectContaining({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + }), + ); + }); + + it('sets SDK metadata before calling Node init', () => { + const app = new Hono(); + const options = { + dsn: 'https://public@dsn.ingest.sentry.io/1337', + }; + + sentry(app, options); + + const applySdkMetadataCallOrder = applySdkMetadataMock.mock.invocationCallOrder[0]; + const initNodeCallOrder = (initNodeMock as Mock).mock.invocationCallOrder[0]; + + expect(applySdkMetadataCallOrder).toBeLessThan(initNodeCallOrder as number); + }); + + it('preserves all user options', () => { + const app = new Hono(); + const options = { + dsn: 'https://public@dsn.ingest.sentry.io/1337', + environment: 'production', + sampleRate: 0.5, + tracesSampleRate: 1.0, + debug: true, + }; + + sentry(app, options); + + expect(initNodeMock).toHaveBeenCalledWith( + expect.objectContaining({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + environment: 'production', + sampleRate: 0.5, + tracesSampleRate: 1.0, + debug: true, + }), + ); + }); + + it('returns a middleware handler function', () => { + const app = new Hono(); + const options = { + dsn: 'https://public@dsn.ingest.sentry.io/1337', + }; + + const middleware = sentry(app, options); + + expect(middleware).toBeDefined(); + expect(typeof middleware).toBe('function'); + expect(middleware).toHaveLength(2); // Hono middleware takes (context, next) + }); + + it('returns an async middleware handler', () => { + const app = new Hono(); + const middleware = sentry(app, {}); + + expect(middleware.constructor.name).toBe('AsyncFunction'); + }); + + it('passes an integrations function to initNode (never a raw array)', () => { + const app = new Hono(); + sentry(app, { dsn: 'https://public@dsn.ingest.sentry.io/1337' }); + + const callArgs = (initNodeMock as Mock).mock.calls[0]?.[0]; + expect(typeof callArgs.integrations).toBe('function'); + }); + + it('includes hono SDK metadata', () => { + const app = new Hono(); + const options = { + dsn: 'https://public@dsn.ingest.sentry.io/1337', + }; + + sentry(app, options); + + expect(initNodeMock).toHaveBeenCalledWith( + expect.objectContaining({ + _metadata: expect.objectContaining({ + sdk: expect.objectContaining({ + name: 'sentry.javascript.hono', + version: SDK_VERSION, + packages: [ + { name: 'npm:@sentry/hono', version: SDK_VERSION }, + { name: 'npm:@sentry/node', version: SDK_VERSION }, + ], + }), + }), + }), + ); + }); + }); + + describe('Hono integration filtering', () => { + const honoIntegration = { name: 'Hono' } as Integration; + const otherIntegration = { name: 'Other' } as Integration; + + const getIntegrationsFn = (): ((defaults: Integration[]) => Integration[]) => { + const callArgs = (initNodeMock as Mock).mock.calls[0]?.[0]; + return callArgs.integrations as (defaults: Integration[]) => Integration[]; + }; + + describe('when integrations is an array', () => { + it('keeps a user-explicitly-provided Hono integration', () => { + const app = new Hono(); + sentry(app, { integrations: [honoIntegration, otherIntegration] }); + + const integrationsFn = getIntegrationsFn(); + const result = integrationsFn([]); + expect(result.map(i => i.name)).toContain('Hono'); + expect(result.map(i => i.name)).toContain('Other'); + }); + + it('keeps non-Hono user integrations', () => { + const app = new Hono(); + sentry(app, { integrations: [otherIntegration] }); + + const integrationsFn = getIntegrationsFn(); + expect(integrationsFn([])).toEqual([otherIntegration]); + }); + + it('preserves user-provided Hono even when defaults would also provide it', () => { + const app = new Hono(); + sentry(app, { integrations: [honoIntegration] }); + + const integrationsFn = getIntegrationsFn(); + // Defaults include Hono, but it should be filtered from defaults; user's copy is kept + const result = integrationsFn([honoIntegration, otherIntegration]); + expect(result.filter(i => i.name === 'Hono')).toHaveLength(1); + }); + + it('removes Hono from defaults when user does not explicitly provide it', () => { + const app = new Hono(); + sentry(app, { integrations: [otherIntegration] }); + + const integrationsFn = getIntegrationsFn(); + const defaultsWithHono = [honoIntegration, otherIntegration]; + const result = integrationsFn(defaultsWithHono); + expect(result.map(i => i.name)).not.toContain('Hono'); + }); + + it('deduplicates non-Hono integrations when user integrations overlap with defaults', () => { + const app = new Hono(); + const duplicateIntegration = { name: 'Other' } as Integration; + sentry(app, { integrations: [duplicateIntegration] }); + + const integrationsFn = getIntegrationsFn(); + const defaultsWithOverlap = [honoIntegration, otherIntegration]; + const result = integrationsFn(defaultsWithOverlap); + expect(result).toHaveLength(1); + expect(result[0]?.name).toBe('Other'); + }); + }); + + describe('when integrations is a function', () => { + it('passes defaults without Hono to the user function', () => { + const app = new Hono(); + const userFn = vi.fn((_defaults: Integration[]) => [otherIntegration]); + const defaultIntegration = { name: 'Default' } as Integration; + + sentry(app, { integrations: userFn }); + + const integrationsFn = getIntegrationsFn(); + integrationsFn([honoIntegration, defaultIntegration]); + + const receivedDefaults = userFn.mock.calls[0]?.[0] as Integration[]; + expect(receivedDefaults.map(i => i.name)).not.toContain('Hono'); + expect(receivedDefaults.map(i => i.name)).toContain('Default'); + }); + + it('preserves a Hono integration explicitly returned by the user function', () => { + const app = new Hono(); + sentry(app, { integrations: () => [honoIntegration, otherIntegration] }); + + const integrationsFn = getIntegrationsFn(); + const result = integrationsFn([]); + expect(result.map(i => i.name)).toContain('Hono'); + expect(result.map(i => i.name)).toContain('Other'); + }); + + it('does not include Hono when user function just returns defaults', () => { + const app = new Hono(); + sentry(app, { integrations: (defaults: Integration[]) => defaults }); + + const integrationsFn = getIntegrationsFn(); + const result = integrationsFn([honoIntegration, otherIntegration]); + expect(result.map(i => i.name)).not.toContain('Hono'); + expect(result.map(i => i.name)).toContain('Other'); + }); + }); + + describe('when integrations is undefined', () => { + it('removes Hono from defaults', () => { + const app = new Hono(); + sentry(app, {}); + + const integrationsFn = getIntegrationsFn(); + expect(integrationsFn([honoIntegration, otherIntegration])).toEqual([otherIntegration]); + }); + }); + }); +}); diff --git a/packages/integration-shims/package.json b/packages/integration-shims/package.json index a0f2b1c926e7..2be75c78604b 100644 --- a/packages/integration-shims/package.json +++ b/packages/integration-shims/package.json @@ -41,8 +41,8 @@ "build:dev:watch": "run-p build:watch", "build:transpile:watch": "yarn build:transpile --watch", "clean": "rimraf build", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "vitest run" }, diff --git a/packages/nestjs/package.json b/packages/nestjs/package.json index b1ee699dc40b..4e1ee3ba4579 100644 --- a/packages/nestjs/package.json +++ b/packages/nestjs/package.json @@ -45,10 +45,10 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/core": "^2.5.1", - "@opentelemetry/instrumentation": "^0.211.0", - "@opentelemetry/instrumentation-nestjs-core": "0.57.0", - "@opentelemetry/semantic-conventions": "^1.39.0", + "@opentelemetry/core": "^2.6.0", + "@opentelemetry/instrumentation": "^0.213.0", + "@opentelemetry/instrumentation-nestjs-core": "0.59.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@sentry/core": "10.43.0", "@sentry/node": "10.43.0" }, @@ -75,8 +75,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts && madge --circular src/setup.ts", "clean": "rimraf build coverage sentry-nestjs-*.tgz ./*.d.ts ./*.d.ts.map", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/cjs/*.js && es-check es2022 ./build/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/nestjs/src/decorators.ts b/packages/nestjs/src/decorators.ts index 8f1a7151894f..53e7bea05866 100644 --- a/packages/nestjs/src/decorators.ts +++ b/packages/nestjs/src/decorators.ts @@ -1,10 +1,5 @@ import type { MonitorConfig } from '@sentry/core'; -import { - captureException, - isThenable, - SEMANTIC_ATTRIBUTE_SENTRY_OP, - SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, -} from '@sentry/core'; +import { captureException, SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import * as Sentry from '@sentry/node'; import { startSpan } from '@sentry/node'; import { isExpectedError } from './helpers'; @@ -20,20 +15,7 @@ export const SentryCron = (monitorSlug: string, monitorConfig?: MonitorConfig): return Sentry.withMonitor( monitorSlug, () => { - let result; - try { - result = originalMethod.apply(this, args); - } catch (e) { - captureException(e, { mechanism: { handled: false, type: 'auto.cron.nestjs' } }); - throw e; - } - if (isThenable(result)) { - return result.then(undefined, e => { - captureException(e, { mechanism: { handled: false, type: 'auto.cron.nestjs.async' } }); - throw e; - }); - } - return result; + return originalMethod.apply(this, args); }, monitorConfig, ); diff --git a/packages/nestjs/src/integrations/helpers.ts b/packages/nestjs/src/integrations/helpers.ts index 31c4e265f8f2..d8b50957f979 100644 --- a/packages/nestjs/src/integrations/helpers.ts +++ b/packages/nestjs/src/integrations/helpers.ts @@ -28,14 +28,19 @@ export function isPatched(target: InjectableTarget | CatchTarget): boolean { * Returns span options for nest middleware spans. */ // eslint-disable-next-line @typescript-eslint/explicit-function-return-type -export function getMiddlewareSpanOptions(target: InjectableTarget | CatchTarget, name: string | undefined = undefined) { +export function getMiddlewareSpanOptions( + target: InjectableTarget | CatchTarget, + name: string | undefined = undefined, + componentType: string | undefined = undefined, +) { const span_name = name ?? target.name; // fallback to class name if no name is provided + const origin = componentType ? `auto.middleware.nestjs.${componentType}` : 'auto.middleware.nestjs'; return { name: span_name, attributes: { [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'middleware.nestjs', - [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.middleware.nestjs', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: origin, }, }; } @@ -58,6 +63,26 @@ export function getEventSpanOptions(event: string): { }; } +/** + * Returns span options for nest bullmq process spans. + */ +export function getBullMQProcessSpanOptions(queueName: string): { + name: string; + attributes: Record; + forceTransaction: boolean; +} { + return { + name: `${queueName} process`, + attributes: { + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'queue.process', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.queue.nestjs.bullmq', + 'messaging.system': 'bullmq', + 'messaging.destination.name': queueName, + }, + forceTransaction: true, + }; +} + /** * Adds instrumentation to a js observable and attaches the span to an active parent span. */ diff --git a/packages/nestjs/src/integrations/nest.ts b/packages/nestjs/src/integrations/nest.ts index 75dc1f845693..330c76f319cb 100644 --- a/packages/nestjs/src/integrations/nest.ts +++ b/packages/nestjs/src/integrations/nest.ts @@ -1,8 +1,10 @@ import { NestInstrumentation as NestInstrumentationCore } from '@opentelemetry/instrumentation-nestjs-core'; import { defineIntegration } from '@sentry/core'; import { generateInstrumentOnce } from '@sentry/node'; +import { SentryNestBullMQInstrumentation } from './sentry-nest-bullmq-instrumentation'; import { SentryNestEventInstrumentation } from './sentry-nest-event-instrumentation'; import { SentryNestInstrumentation } from './sentry-nest-instrumentation'; +import { SentryNestScheduleInstrumentation } from './sentry-nest-schedule-instrumentation'; const INTEGRATION_NAME = 'Nest'; @@ -18,11 +20,21 @@ const instrumentNestEvent = generateInstrumentOnce(`${INTEGRATION_NAME}.Event`, return new SentryNestEventInstrumentation(); }); +const instrumentNestSchedule = generateInstrumentOnce(`${INTEGRATION_NAME}.Schedule`, () => { + return new SentryNestScheduleInstrumentation(); +}); + +const instrumentNestBullMQ = generateInstrumentOnce(`${INTEGRATION_NAME}.BullMQ`, () => { + return new SentryNestBullMQInstrumentation(); +}); + export const instrumentNest = Object.assign( (): void => { instrumentNestCore(); instrumentNestCommon(); instrumentNestEvent(); + instrumentNestSchedule(); + instrumentNestBullMQ(); }, { id: INTEGRATION_NAME }, ); diff --git a/packages/nestjs/src/integrations/sentry-nest-bullmq-instrumentation.ts b/packages/nestjs/src/integrations/sentry-nest-bullmq-instrumentation.ts new file mode 100644 index 000000000000..b18bab1dc07c --- /dev/null +++ b/packages/nestjs/src/integrations/sentry-nest-bullmq-instrumentation.ts @@ -0,0 +1,115 @@ +import type { InstrumentationConfig } from '@opentelemetry/instrumentation'; +import { + InstrumentationBase, + InstrumentationNodeModuleDefinition, + InstrumentationNodeModuleFile, + isWrapped, +} from '@opentelemetry/instrumentation'; +import { captureException, SDK_VERSION, startSpan, withIsolationScope } from '@sentry/core'; +import { getBullMQProcessSpanOptions } from './helpers'; +import type { ProcessorDecoratorTarget } from './types'; + +const supportedVersions = ['>=10.0.0']; +const COMPONENT = '@nestjs/bullmq'; + +/** + * Custom instrumentation for nestjs bullmq module. + * + * This hooks into the `@Processor` class decorator, which is applied on queue processor classes. + * It wraps the `process` method on the decorated class to fork the isolation scope for each job + * invocation, create a span, and capture errors. + */ +export class SentryNestBullMQInstrumentation extends InstrumentationBase { + public constructor(config: InstrumentationConfig = {}) { + super('sentry-nestjs-bullmq', SDK_VERSION, config); + } + + /** + * Initializes the instrumentation by defining the modules to be patched. + */ + public init(): InstrumentationNodeModuleDefinition { + const moduleDef = new InstrumentationNodeModuleDefinition(COMPONENT, supportedVersions); + + moduleDef.files.push(this._getProcessorFileInstrumentation(supportedVersions)); + return moduleDef; + } + + /** + * Wraps the @Processor decorator. + */ + private _getProcessorFileInstrumentation(versions: string[]): InstrumentationNodeModuleFile { + return new InstrumentationNodeModuleFile( + '@nestjs/bullmq/dist/decorators/processor.decorator.js', + versions, + (moduleExports: { Processor: ProcessorDecoratorTarget }) => { + if (isWrapped(moduleExports.Processor)) { + this._unwrap(moduleExports, 'Processor'); + } + this._wrap(moduleExports, 'Processor', this._createWrapProcessor()); + return moduleExports; + }, + (moduleExports: { Processor: ProcessorDecoratorTarget }) => { + this._unwrap(moduleExports, 'Processor'); + }, + ); + } + + /** + * Creates a wrapper function for the @Processor class decorator. + */ + private _createWrapProcessor() { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return function wrapProcessor(original: any) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return function wrappedProcessor(...decoratorArgs: any[]) { + // Extract queue name from decorator args + // @Processor('queueName') or @Processor({ name: 'queueName' }) + const queueName = + // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + typeof decoratorArgs[0] === 'string' ? decoratorArgs[0] : decoratorArgs[0]?.name || 'unknown'; + + // Get the original class decorator + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + const classDecorator = original(...decoratorArgs); + + // Return a new class decorator that wraps the process method + return function (target: ProcessorDecoratorTarget) { + const originalProcess = target.prototype.process; + + if ( + originalProcess && + typeof originalProcess === 'function' && + !target.__SENTRY_INTERNAL__ && + !originalProcess.__SENTRY_INSTRUMENTED__ + ) { + target.prototype.process = new Proxy(originalProcess, { + apply: (originalProcessFn, thisArg, args) => { + return withIsolationScope(() => { + return startSpan(getBullMQProcessSpanOptions(queueName), async () => { + try { + return await originalProcessFn.apply(thisArg, args); + } catch (error) { + captureException(error, { + mechanism: { + handled: false, + type: 'auto.queue.nestjs.bullmq', + }, + }); + throw error; + } + }); + }); + }, + }); + + target.prototype.process.__SENTRY_INSTRUMENTED__ = true; + } + + // Apply the original class decorator + // eslint-disable-next-line @typescript-eslint/no-unsafe-return + return classDecorator(target); + }; + }; + }; + } +} diff --git a/packages/nestjs/src/integrations/sentry-nest-event-instrumentation.ts b/packages/nestjs/src/integrations/sentry-nest-event-instrumentation.ts index 92c90c3719de..b4f8784eea05 100644 --- a/packages/nestjs/src/integrations/sentry-nest-event-instrumentation.ts +++ b/packages/nestjs/src/integrations/sentry-nest-event-instrumentation.ts @@ -5,7 +5,7 @@ import { InstrumentationNodeModuleFile, isWrapped, } from '@opentelemetry/instrumentation'; -import { captureException, SDK_VERSION, startSpan } from '@sentry/core'; +import { captureException, SDK_VERSION, startSpan, withIsolationScope } from '@sentry/core'; import { getEventSpanOptions } from './helpers'; import type { OnEventTarget } from './types'; @@ -16,6 +16,9 @@ const COMPONENT = '@nestjs/event-emitter'; * Custom instrumentation for nestjs event-emitter * * This hooks into the `OnEvent` decorator, which is applied on event handlers. + * Wrapped handlers run inside a forked isolation scope to ensure event-scoped data + * (breadcrumbs, tags, etc.) does not leak between concurrent event invocations + * or into subsequent HTTP requests. */ export class SentryNestEventInstrumentation extends InstrumentationBase { public constructor(config: InstrumentationConfig = {}) { @@ -110,21 +113,23 @@ export class SentryNestEventInstrumentation extends InstrumentationBase { } } - return startSpan(getEventSpanOptions(eventName), async () => { - try { - // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access - const result = await originalHandler.apply(this, args); - return result; - } catch (error) { - // exceptions from event handlers are not caught by global error filter - captureException(error, { - mechanism: { - handled: false, - type: 'auto.event.nestjs', - }, - }); - throw error; - } + return withIsolationScope(() => { + return startSpan(getEventSpanOptions(eventName), async () => { + try { + // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + const result = await originalHandler.apply(this, args); + return result; + } catch (error) { + // exceptions from event handlers are not caught by global error filter + captureException(error, { + mechanism: { + handled: false, + type: 'auto.event.nestjs', + }, + }); + throw error; + } + }); }); }; diff --git a/packages/nestjs/src/integrations/sentry-nest-instrumentation.ts b/packages/nestjs/src/integrations/sentry-nest-instrumentation.ts index 04b20f5d4d6a..e1d7fa978020 100644 --- a/packages/nestjs/src/integrations/sentry-nest-instrumentation.ts +++ b/packages/nestjs/src/integrations/sentry-nest-instrumentation.ts @@ -140,7 +140,7 @@ export class SentryNestInstrumentation extends InstrumentationBase { return originalCanActivate.apply(thisArgCanActivate, argsCanActivate); } - return startSpan(getMiddlewareSpanOptions(target), () => { + return startSpan(getMiddlewareSpanOptions(target, undefined, 'guard'), () => { return originalCanActivate.apply(thisArgCanActivate, argsCanActivate); }); }, @@ -162,7 +162,7 @@ export class SentryNestInstrumentation extends InstrumentationBase { return originalTransform.apply(thisArgTransform, argsTransform); } - return startSpan(getMiddlewareSpanOptions(target), () => { + return startSpan(getMiddlewareSpanOptions(target, undefined, 'pipe'), () => { return originalTransform.apply(thisArgTransform, argsTransform); }); }, @@ -192,74 +192,82 @@ export class SentryNestInstrumentation extends InstrumentationBase { return originalIntercept.apply(thisArgIntercept, argsIntercept); } - return startSpanManual(getMiddlewareSpanOptions(target), (beforeSpan: Span) => { - // eslint-disable-next-line @typescript-eslint/unbound-method - next.handle = new Proxy(next.handle, { - apply: (originalHandle, thisArgHandle, argsHandle) => { - beforeSpan.end(); + return startSpanManual( + getMiddlewareSpanOptions(target, undefined, 'interceptor'), + (beforeSpan: Span) => { + // eslint-disable-next-line @typescript-eslint/unbound-method + next.handle = new Proxy(next.handle, { + apply: (originalHandle, thisArgHandle, argsHandle) => { + beforeSpan.end(); + + if (parentSpan) { + return withActiveSpan(parentSpan, () => { + const handleReturnObservable = Reflect.apply(originalHandle, thisArgHandle, argsHandle); + + if (!SeenNestjsContextSet.has(context)) { + SeenNestjsContextSet.add(context); + afterSpan = startInactiveSpan( + getMiddlewareSpanOptions(target, 'Interceptors - After Route', 'interceptor'), + ); + } - if (parentSpan) { - return withActiveSpan(parentSpan, () => { + return handleReturnObservable; + }); + } else { const handleReturnObservable = Reflect.apply(originalHandle, thisArgHandle, argsHandle); if (!SeenNestjsContextSet.has(context)) { SeenNestjsContextSet.add(context); afterSpan = startInactiveSpan( - getMiddlewareSpanOptions(target, 'Interceptors - After Route'), + getMiddlewareSpanOptions(target, 'Interceptors - After Route', 'interceptor'), ); } return handleReturnObservable; - }); - } else { - const handleReturnObservable = Reflect.apply(originalHandle, thisArgHandle, argsHandle); - - if (!SeenNestjsContextSet.has(context)) { - SeenNestjsContextSet.add(context); - afterSpan = startInactiveSpan(getMiddlewareSpanOptions(target, 'Interceptors - After Route')); } + }, + }); - return handleReturnObservable; - } - }, - }); - - let returnedObservableInterceptMaybePromise: Observable | Promise>; + let returnedObservableInterceptMaybePromise: Observable | Promise>; - try { - returnedObservableInterceptMaybePromise = originalIntercept.apply(thisArgIntercept, argsIntercept); - } catch (e) { - beforeSpan.end(); - afterSpan?.end(); - throw e; - } + try { + returnedObservableInterceptMaybePromise = originalIntercept.apply( + thisArgIntercept, + argsIntercept, + ); + } catch (e) { + beforeSpan.end(); + afterSpan?.end(); + throw e; + } + + if (!afterSpan) { + return returnedObservableInterceptMaybePromise; + } + + // handle async interceptor + if (isThenable(returnedObservableInterceptMaybePromise)) { + return returnedObservableInterceptMaybePromise.then( + observable => { + instrumentObservable(observable, afterSpan ?? parentSpan); + return observable; + }, + e => { + beforeSpan.end(); + afterSpan?.end(); + throw e; + }, + ); + } + + // handle sync interceptor + if (typeof returnedObservableInterceptMaybePromise.subscribe === 'function') { + instrumentObservable(returnedObservableInterceptMaybePromise, afterSpan); + } - if (!afterSpan) { return returnedObservableInterceptMaybePromise; - } - - // handle async interceptor - if (isThenable(returnedObservableInterceptMaybePromise)) { - return returnedObservableInterceptMaybePromise.then( - observable => { - instrumentObservable(observable, afterSpan ?? parentSpan); - return observable; - }, - e => { - beforeSpan.end(); - afterSpan?.end(); - throw e; - }, - ); - } - - // handle sync interceptor - if (typeof returnedObservableInterceptMaybePromise.subscribe === 'function') { - instrumentObservable(returnedObservableInterceptMaybePromise, afterSpan); - } - - return returnedObservableInterceptMaybePromise; - }); + }, + ); }, }); } @@ -293,7 +301,7 @@ export class SentryNestInstrumentation extends InstrumentationBase { return originalCatch.apply(thisArgCatch, argsCatch); } - return startSpan(getMiddlewareSpanOptions(target), () => { + return startSpan(getMiddlewareSpanOptions(target, undefined, 'exception_filter'), () => { return originalCatch.apply(thisArgCatch, argsCatch); }); }, diff --git a/packages/nestjs/src/integrations/sentry-nest-schedule-instrumentation.ts b/packages/nestjs/src/integrations/sentry-nest-schedule-instrumentation.ts new file mode 100644 index 000000000000..ea0261164f9c --- /dev/null +++ b/packages/nestjs/src/integrations/sentry-nest-schedule-instrumentation.ts @@ -0,0 +1,176 @@ +import type { InstrumentationConfig } from '@opentelemetry/instrumentation'; +import { + InstrumentationBase, + InstrumentationNodeModuleDefinition, + InstrumentationNodeModuleFile, + isWrapped, +} from '@opentelemetry/instrumentation'; +import { captureException, isThenable, SDK_VERSION, withIsolationScope } from '@sentry/core'; +import type { ScheduleDecoratorTarget } from './types'; + +const supportedVersions = ['>=2.0.0']; +const COMPONENT = '@nestjs/schedule'; + +/** + * Custom instrumentation for nestjs schedule module. + * + * This hooks into the `@Cron`, `@Interval`, and `@Timeout` decorators, which are applied on scheduled task handlers. + * It forks the isolation scope for each handler invocation, preventing data leakage to subsequent HTTP requests. + */ +export class SentryNestScheduleInstrumentation extends InstrumentationBase { + public constructor(config: InstrumentationConfig = {}) { + super('sentry-nestjs-schedule', SDK_VERSION, config); + } + + /** + * Initializes the instrumentation by defining the modules to be patched. + */ + public init(): InstrumentationNodeModuleDefinition { + const moduleDef = new InstrumentationNodeModuleDefinition(COMPONENT, supportedVersions); + + moduleDef.files.push(this._getCronFileInstrumentation(supportedVersions)); + moduleDef.files.push(this._getIntervalFileInstrumentation(supportedVersions)); + moduleDef.files.push(this._getTimeoutFileInstrumentation(supportedVersions)); + return moduleDef; + } + + /** + * Wraps the @Cron decorator. + */ + private _getCronFileInstrumentation(versions: string[]): InstrumentationNodeModuleFile { + return new InstrumentationNodeModuleFile( + '@nestjs/schedule/dist/decorators/cron.decorator.js', + versions, + (moduleExports: { Cron: ScheduleDecoratorTarget }) => { + if (isWrapped(moduleExports.Cron)) { + this._unwrap(moduleExports, 'Cron'); + } + this._wrap(moduleExports, 'Cron', this._createWrapDecorator('auto.function.nestjs.cron')); + return moduleExports; + }, + (moduleExports: { Cron: ScheduleDecoratorTarget }) => { + this._unwrap(moduleExports, 'Cron'); + }, + ); + } + + /** + * Wraps the @Interval decorator. + */ + private _getIntervalFileInstrumentation(versions: string[]): InstrumentationNodeModuleFile { + return new InstrumentationNodeModuleFile( + '@nestjs/schedule/dist/decorators/interval.decorator.js', + versions, + (moduleExports: { Interval: ScheduleDecoratorTarget }) => { + if (isWrapped(moduleExports.Interval)) { + this._unwrap(moduleExports, 'Interval'); + } + this._wrap(moduleExports, 'Interval', this._createWrapDecorator('auto.function.nestjs.interval')); + return moduleExports; + }, + (moduleExports: { Interval: ScheduleDecoratorTarget }) => { + this._unwrap(moduleExports, 'Interval'); + }, + ); + } + + /** + * Wraps the @Timeout decorator. + */ + private _getTimeoutFileInstrumentation(versions: string[]): InstrumentationNodeModuleFile { + return new InstrumentationNodeModuleFile( + '@nestjs/schedule/dist/decorators/timeout.decorator.js', + versions, + (moduleExports: { Timeout: ScheduleDecoratorTarget }) => { + if (isWrapped(moduleExports.Timeout)) { + this._unwrap(moduleExports, 'Timeout'); + } + this._wrap(moduleExports, 'Timeout', this._createWrapDecorator('auto.function.nestjs.timeout')); + return moduleExports; + }, + (moduleExports: { Timeout: ScheduleDecoratorTarget }) => { + this._unwrap(moduleExports, 'Timeout'); + }, + ); + } + + /** + * Creates a wrapper function for a schedule decorator (@Cron, @Interval, or @Timeout). + */ + private _createWrapDecorator(mechanismType: string) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return function wrapDecorator(original: any) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return function wrappedDecorator(...decoratorArgs: any[]) { + // Get the original decorator result + const decoratorResult = original(...decoratorArgs); + + // Return a new decorator function that wraps the handler + return (target: ScheduleDecoratorTarget, propertyKey: string | symbol, descriptor: PropertyDescriptor) => { + if ( + !descriptor.value || + typeof descriptor.value !== 'function' || + target.__SENTRY_INTERNAL__ || + // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + descriptor.value.__SENTRY_INSTRUMENTED__ + ) { + return decoratorResult(target, propertyKey, descriptor); + } + + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment + const originalHandler: (...handlerArgs: unknown[]) => unknown = descriptor.value; + const handlerName = originalHandler.name || propertyKey; + + // Not using async/await here to avoid changing the return type of sync handlers. + // This means we need to handle sync and async errors separately. + descriptor.value = function (...args: unknown[]) { + return withIsolationScope(() => { + let result; + try { + // Catches errors from sync handlers + result = originalHandler.apply(this, args); + } catch (error) { + captureException(error, { + mechanism: { + handled: false, + type: mechanismType, + }, + }); + throw error; + } + + // Catches errors from async handlers (rejected promises bypass try/catch) + if (isThenable(result)) { + return result.then(undefined, (error: unknown) => { + captureException(error, { + mechanism: { + handled: false, + type: mechanismType, + }, + }); + throw error; + }); + } + + return result; + }); + }; + + // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + descriptor.value.__SENTRY_INSTRUMENTED__ = true; + + // Preserve the original function name + Object.defineProperty(descriptor.value, 'name', { + value: handlerName, + configurable: true, + enumerable: true, + writable: true, + }); + + // Apply the original decorator + return decoratorResult(target, propertyKey, descriptor); + }; + }; + }; + } +} diff --git a/packages/nestjs/src/integrations/types.ts b/packages/nestjs/src/integrations/types.ts index 8283e652edfb..6dd00caa8cc1 100644 --- a/packages/nestjs/src/integrations/types.ts +++ b/packages/nestjs/src/integrations/types.ts @@ -91,10 +91,28 @@ export interface CatchTarget { */ export interface OnEventTarget { name: string; - sentryPatched?: boolean; __SENTRY_INTERNAL__?: boolean; } +/** + * Represents a target method in NestJS annotated with @Cron, @Interval, or @Timeout. + */ +export interface ScheduleDecoratorTarget { + name: string; + __SENTRY_INTERNAL__?: boolean; +} + +/** + * Represents a target class in NestJS annotated with @Processor (BullMQ). + */ +export interface ProcessorDecoratorTarget { + name: string; + __SENTRY_INTERNAL__?: boolean; + prototype: { + process?: ((...args: any[]) => Promise) & { __SENTRY_INSTRUMENTED__?: boolean }; + }; +} + /** * Represents an express NextFunction. */ diff --git a/packages/nestjs/test/integrations/bullmq.test.ts b/packages/nestjs/test/integrations/bullmq.test.ts new file mode 100644 index 000000000000..349a0c1b8e43 --- /dev/null +++ b/packages/nestjs/test/integrations/bullmq.test.ts @@ -0,0 +1,155 @@ +import 'reflect-metadata'; +import * as core from '@sentry/core'; +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { SentryNestBullMQInstrumentation } from '../../src/integrations/sentry-nest-bullmq-instrumentation'; + +describe('BullMQInstrumentation', () => { + let instrumentation: SentryNestBullMQInstrumentation; + + beforeEach(() => { + instrumentation = new SentryNestBullMQInstrumentation(); + vi.spyOn(core, 'captureException'); + vi.spyOn(core, 'withIsolationScope').mockImplementation(callback => { + return (callback as () => unknown)(); + }); + vi.spyOn(core, 'startSpan').mockImplementation((_, callback) => { + return (callback as () => unknown)(); + }); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('Processor decorator wrapping', () => { + let wrappedDecorator: any; + let mockClassDecorator: vi.Mock; + let mockProcessor: any; + + beforeEach(() => { + mockClassDecorator = vi.fn().mockImplementation(() => { + return (target: any) => target; + }); + + const moduleDef = instrumentation.init(); + const file = moduleDef.files[0]; + const moduleExports = { Processor: mockClassDecorator }; + file?.patch(moduleExports); + wrappedDecorator = moduleExports.Processor; + }); + + it('should call withIsolationScope and startSpan on process execution', async () => { + const originalProcess = vi.fn().mockResolvedValue('result'); + + mockProcessor = class TestProcessor { + process = originalProcess; + }; + mockProcessor.prototype.process = originalProcess; + + const classDecoratorFn = wrappedDecorator('test-queue'); + classDecoratorFn(mockProcessor); + + await mockProcessor.prototype.process(); + + expect(core.withIsolationScope).toHaveBeenCalled(); + expect(core.startSpan).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'test-queue process', + forceTransaction: true, + attributes: expect.objectContaining({ + 'sentry.op': 'queue.process', + 'sentry.origin': 'auto.queue.nestjs.bullmq', + 'messaging.system': 'bullmq', + 'messaging.destination.name': 'test-queue', + }), + }), + expect.any(Function), + ); + expect(originalProcess).toHaveBeenCalled(); + }); + + it('should capture async exceptions and rethrow', async () => { + const error = new Error('Test error'); + const originalProcess = vi.fn().mockRejectedValue(error); + + mockProcessor = class TestProcessor {}; + mockProcessor.prototype.process = originalProcess; + + const classDecoratorFn = wrappedDecorator('test-queue'); + classDecoratorFn(mockProcessor); + + await expect(mockProcessor.prototype.process()).rejects.toThrow(error); + expect(core.captureException).toHaveBeenCalledWith(error, { + mechanism: { + handled: false, + type: 'auto.queue.nestjs.bullmq', + }, + }); + }); + + it('should skip wrapping when __SENTRY_INTERNAL__ is set', async () => { + const originalProcess = vi.fn().mockResolvedValue('result'); + + mockProcessor = class TestProcessor {}; + mockProcessor.prototype.process = originalProcess; + mockProcessor.__SENTRY_INTERNAL__ = true; + + const classDecoratorFn = wrappedDecorator('test-queue'); + classDecoratorFn(mockProcessor); + + // process should not be wrapped + expect(mockProcessor.prototype.process).toBe(originalProcess); + }); + + it('should not double-wrap process method', async () => { + const originalProcess = vi.fn().mockResolvedValue('result'); + + mockProcessor = class TestProcessor {}; + mockProcessor.prototype.process = originalProcess; + + const classDecoratorFn = wrappedDecorator('test-queue'); + classDecoratorFn(mockProcessor); + + const wrappedProcess = mockProcessor.prototype.process; + expect(wrappedProcess).not.toBe(originalProcess); + + // Apply decorator again + const classDecoratorFn2 = wrappedDecorator('test-queue'); + classDecoratorFn2(mockProcessor); + + // Should still be the same wrapped function (not double-wrapped) + expect(mockProcessor.prototype.process).toBe(wrappedProcess); + }); + + it('should extract queue name from ProcessorOptions object', async () => { + const originalProcess = vi.fn().mockResolvedValue('result'); + + mockProcessor = class TestProcessor {}; + mockProcessor.prototype.process = originalProcess; + + const classDecoratorFn = wrappedDecorator({ name: 'my-queue' }); + classDecoratorFn(mockProcessor); + + await mockProcessor.prototype.process(); + + expect(core.startSpan).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'my-queue process', + }), + expect.any(Function), + ); + }); + + it('should apply the original class decorator', () => { + const originalProcess = vi.fn().mockResolvedValue('result'); + + mockProcessor = class TestProcessor {}; + mockProcessor.prototype.process = originalProcess; + + const classDecoratorFn = wrappedDecorator('test-queue'); + classDecoratorFn(mockProcessor); + + expect(mockClassDecorator).toHaveBeenCalledWith('test-queue'); + }); + }); +}); diff --git a/packages/nestjs/test/integrations/nest-event.test.ts b/packages/nestjs/test/integrations/nest-event.test.ts new file mode 100644 index 000000000000..debf5bc8e34a --- /dev/null +++ b/packages/nestjs/test/integrations/nest-event.test.ts @@ -0,0 +1,166 @@ +import 'reflect-metadata'; +import * as core from '@sentry/core'; +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { SentryNestEventInstrumentation } from '../../src/integrations/sentry-nest-event-instrumentation'; +import type { OnEventTarget } from '../../src/integrations/types'; + +describe('EventInstrumentation', () => { + let instrumentation: SentryNestEventInstrumentation; + let mockOnEvent: vi.Mock; + let mockTarget: OnEventTarget; + + beforeEach(() => { + instrumentation = new SentryNestEventInstrumentation(); + // Mock OnEvent to return a function that applies the descriptor + mockOnEvent = vi.fn().mockImplementation(() => { + return (target: any, propertyKey: string, descriptor: PropertyDescriptor) => { + return descriptor; + }; + }); + mockTarget = { + name: 'TestClass', + prototype: {}, + } as OnEventTarget; + vi.spyOn(core, 'startSpan'); + vi.spyOn(core, 'captureException'); + vi.spyOn(core, 'withIsolationScope'); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('init()', () => { + it('should return module definition with correct component name', () => { + const moduleDef = instrumentation.init(); + expect(moduleDef.name).toBe('@nestjs/event-emitter'); + }); + }); + + describe('OnEvent decorator wrapping', () => { + let wrappedOnEvent: any; + let descriptor: PropertyDescriptor; + let originalHandler: vi.Mock; + + beforeEach(() => { + originalHandler = vi.fn().mockResolvedValue('result'); + descriptor = { + value: originalHandler, + }; + + const moduleDef = instrumentation.init(); + const onEventFile = moduleDef.files[0]; + const moduleExports = { OnEvent: mockOnEvent }; + onEventFile?.patch(moduleExports); + wrappedOnEvent = moduleExports.OnEvent; + }); + + it('should wrap string event handlers', async () => { + const decorated = wrappedOnEvent('test.event'); + decorated(mockTarget, 'testMethod', descriptor); + + await descriptor.value(); + + expect(core.withIsolationScope).toHaveBeenCalled(); + expect(core.startSpan).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'event test.event', + }), + expect.any(Function), + ); + expect(originalHandler).toHaveBeenCalled(); + }); + + it('should wrap symbol event handlers', async () => { + const decorated = wrappedOnEvent(Symbol('test.event')); + decorated(mockTarget, 'testMethod', descriptor); + + await descriptor.value(); + + expect(core.withIsolationScope).toHaveBeenCalled(); + expect(core.startSpan).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'event Symbol(test.event)', + }), + expect.any(Function), + ); + expect(originalHandler).toHaveBeenCalled(); + }); + + it('should wrap string array event handlers', async () => { + const decorated = wrappedOnEvent(['test.event1', 'test.event2']); + decorated(mockTarget, 'testMethod', descriptor); + + await descriptor.value(); + + expect(core.withIsolationScope).toHaveBeenCalled(); + expect(core.startSpan).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'event test.event1,test.event2', + }), + expect.any(Function), + ); + expect(originalHandler).toHaveBeenCalled(); + }); + + it('should wrap symbol array event handlers', async () => { + const decorated = wrappedOnEvent([Symbol('test.event1'), Symbol('test.event2')]); + decorated(mockTarget, 'testMethod', descriptor); + + await descriptor.value(); + + expect(core.withIsolationScope).toHaveBeenCalled(); + expect(core.startSpan).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'event Symbol(test.event1),Symbol(test.event2)', + }), + expect.any(Function), + ); + expect(originalHandler).toHaveBeenCalled(); + }); + + it('should wrap mixed type array event handlers', async () => { + const decorated = wrappedOnEvent([Symbol('test.event1'), 'test.event2', Symbol('test.event3')]); + decorated(mockTarget, 'testMethod', descriptor); + + await descriptor.value(); + + expect(core.withIsolationScope).toHaveBeenCalled(); + expect(core.startSpan).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'event Symbol(test.event1),test.event2,Symbol(test.event3)', + }), + expect.any(Function), + ); + expect(originalHandler).toHaveBeenCalled(); + }); + + it('should capture exceptions and rethrow', async () => { + const error = new Error('Test error'); + originalHandler.mockRejectedValue(error); + + const decorated = wrappedOnEvent('test.event'); + decorated(mockTarget, 'testMethod', descriptor); + + await expect(descriptor.value()).rejects.toThrow(error); + expect(core.captureException).toHaveBeenCalledWith(error, { + mechanism: { + handled: false, + type: 'auto.event.nestjs', + }, + }); + }); + + it('should skip wrapping for internal Sentry handlers', () => { + const internalTarget = { + ...mockTarget, + __SENTRY_INTERNAL__: true, + }; + + const decorated = wrappedOnEvent('test.event'); + decorated(internalTarget, 'testMethod', descriptor); + + expect(descriptor.value).toBe(originalHandler); + }); + }); +}); diff --git a/packages/nestjs/test/integrations/nest.test.ts b/packages/nestjs/test/integrations/nest.test.ts index 2d1d73b4657a..6b758d44c982 100644 --- a/packages/nestjs/test/integrations/nest.test.ts +++ b/packages/nestjs/test/integrations/nest.test.ts @@ -1,9 +1,6 @@ -import 'reflect-metadata'; -import * as core from '@sentry/core'; -import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { describe, expect, it } from 'vitest'; import { isPatched } from '../../src/integrations/helpers'; -import { SentryNestEventInstrumentation } from '../../src/integrations/sentry-nest-event-instrumentation'; -import type { InjectableTarget, OnEventTarget } from '../../src/integrations/types'; +import type { InjectableTarget } from '../../src/integrations/types'; describe('Nest', () => { describe('isPatched', () => { @@ -18,159 +15,4 @@ describe('Nest', () => { expect(target.sentryPatched).toBe(true); }); }); - - describe('EventInstrumentation', () => { - let instrumentation: SentryNestEventInstrumentation; - let mockOnEvent: vi.Mock; - let mockTarget: OnEventTarget; - - beforeEach(() => { - instrumentation = new SentryNestEventInstrumentation(); - // Mock OnEvent to return a function that applies the descriptor - mockOnEvent = vi.fn().mockImplementation(() => { - return (target: any, propertyKey: string, descriptor: PropertyDescriptor) => { - return descriptor; - }; - }); - mockTarget = { - name: 'TestClass', - prototype: {}, - } as OnEventTarget; - vi.spyOn(core, 'startSpan'); - vi.spyOn(core, 'captureException'); - }); - - afterEach(() => { - vi.restoreAllMocks(); - }); - - describe('init()', () => { - it('should return module definition with correct component name', () => { - const moduleDef = instrumentation.init(); - expect(moduleDef.name).toBe('@nestjs/event-emitter'); - }); - }); - - describe('OnEvent decorator wrapping', () => { - let wrappedOnEvent: any; - let descriptor: PropertyDescriptor; - let originalHandler: vi.Mock; - - beforeEach(() => { - originalHandler = vi.fn().mockResolvedValue('result'); - descriptor = { - value: originalHandler, - }; - - const moduleDef = instrumentation.init(); - const onEventFile = moduleDef.files[0]; - const moduleExports = { OnEvent: mockOnEvent }; - onEventFile?.patch(moduleExports); - wrappedOnEvent = moduleExports.OnEvent; - }); - - it('should wrap string event handlers', async () => { - const decorated = wrappedOnEvent('test.event'); - decorated(mockTarget, 'testMethod', descriptor); - - await descriptor.value(); - - expect(core.startSpan).toHaveBeenCalledWith( - expect.objectContaining({ - name: 'event test.event', - }), - expect.any(Function), - ); - expect(originalHandler).toHaveBeenCalled(); - }); - - it('should wrap symbol event handlers', async () => { - const decorated = wrappedOnEvent(Symbol('test.event')); - decorated(mockTarget, 'testMethod', descriptor); - - await descriptor.value(); - - expect(core.startSpan).toHaveBeenCalledWith( - expect.objectContaining({ - name: 'event Symbol(test.event)', - }), - expect.any(Function), - ); - expect(originalHandler).toHaveBeenCalled(); - }); - - it('should wrap string array event handlers', async () => { - const decorated = wrappedOnEvent(['test.event1', 'test.event2']); - decorated(mockTarget, 'testMethod', descriptor); - - await descriptor.value(); - - expect(core.startSpan).toHaveBeenCalledWith( - expect.objectContaining({ - name: 'event test.event1,test.event2', - }), - expect.any(Function), - ); - expect(originalHandler).toHaveBeenCalled(); - }); - - it('should wrap symbol array event handlers', async () => { - const decorated = wrappedOnEvent([Symbol('test.event1'), Symbol('test.event2')]); - decorated(mockTarget, 'testMethod', descriptor); - - await descriptor.value(); - - expect(core.startSpan).toHaveBeenCalledWith( - expect.objectContaining({ - name: 'event Symbol(test.event1),Symbol(test.event2)', - }), - expect.any(Function), - ); - expect(originalHandler).toHaveBeenCalled(); - }); - - it('should wrap mixed type array event handlers', async () => { - const decorated = wrappedOnEvent([Symbol('test.event1'), 'test.event2', Symbol('test.event3')]); - decorated(mockTarget, 'testMethod', descriptor); - - await descriptor.value(); - - expect(core.startSpan).toHaveBeenCalledWith( - expect.objectContaining({ - name: 'event Symbol(test.event1),test.event2,Symbol(test.event3)', - }), - expect.any(Function), - ); - expect(originalHandler).toHaveBeenCalled(); - }); - - it('should capture exceptions and rethrow', async () => { - const error = new Error('Test error'); - originalHandler.mockRejectedValue(error); - - const decorated = wrappedOnEvent('test.event'); - decorated(mockTarget, 'testMethod', descriptor); - - await expect(descriptor.value()).rejects.toThrow(error); - expect(core.captureException).toHaveBeenCalledWith(error, { - mechanism: { - handled: false, - type: 'auto.event.nestjs', - }, - }); - }); - - it('should skip wrapping for internal Sentry handlers', () => { - const internalTarget = { - ...mockTarget, - __SENTRY_INTERNAL__: true, - }; - - const decorated = wrappedOnEvent('test.event'); - decorated(internalTarget, 'testMethod', descriptor); - - expect(descriptor.value).toBe(originalHandler); - }); - }); - }); }); diff --git a/packages/nestjs/test/integrations/schedule.test.ts b/packages/nestjs/test/integrations/schedule.test.ts new file mode 100644 index 000000000000..3694499c1919 --- /dev/null +++ b/packages/nestjs/test/integrations/schedule.test.ts @@ -0,0 +1,113 @@ +import 'reflect-metadata'; +import * as core from '@sentry/core'; +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { SentryNestScheduleInstrumentation } from '../../src/integrations/sentry-nest-schedule-instrumentation'; +import type { ScheduleDecoratorTarget } from '../../src/integrations/types'; + +describe('ScheduleInstrumentation', () => { + let instrumentation: SentryNestScheduleInstrumentation; + let mockTarget: ScheduleDecoratorTarget; + + beforeEach(() => { + instrumentation = new SentryNestScheduleInstrumentation(); + mockTarget = { + name: 'TestClass', + } as ScheduleDecoratorTarget; + vi.spyOn(core, 'captureException'); + vi.spyOn(core, 'withIsolationScope').mockImplementation(callback => { + return (callback as () => unknown)(); + }); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe.each([ + { decoratorName: 'Cron', fileIndex: 0, mechanismType: 'auto.function.nestjs.cron' }, + { decoratorName: 'Interval', fileIndex: 1, mechanismType: 'auto.function.nestjs.interval' }, + { decoratorName: 'Timeout', fileIndex: 2, mechanismType: 'auto.function.nestjs.timeout' }, + ])('$decoratorName decorator wrapping', ({ decoratorName, fileIndex, mechanismType }) => { + let wrappedDecorator: any; + let descriptor: PropertyDescriptor; + let originalHandler: vi.Mock; + let mockDecorator: vi.Mock; + + beforeEach(() => { + originalHandler = vi.fn(function testHandler() { + return 'result'; + }); + descriptor = { + value: originalHandler, + }; + + mockDecorator = vi.fn().mockImplementation(() => { + return (_target: any, _propertyKey: string, descriptor: PropertyDescriptor) => { + return descriptor; + }; + }); + + const moduleDef = instrumentation.init(); + const file = moduleDef.files[fileIndex]; + const moduleExports = { [decoratorName]: mockDecorator }; + file?.patch(moduleExports); + wrappedDecorator = moduleExports[decoratorName]; + }); + + it('should call withIsolationScope on handler execution', () => { + const decorated = wrappedDecorator('test-arg'); + decorated(mockTarget, 'testMethod', descriptor); + + descriptor.value(); + + expect(core.withIsolationScope).toHaveBeenCalled(); + expect(originalHandler).toHaveBeenCalled(); + }); + + it('should capture sync exceptions and rethrow', () => { + const error = new Error('Test error'); + originalHandler.mockImplementation(() => { + throw error; + }); + + const decorated = wrappedDecorator('test-arg'); + decorated(mockTarget, 'testMethod', descriptor); + + expect(() => descriptor.value()).toThrow(error); + expect(core.captureException).toHaveBeenCalledWith(error, { + mechanism: { + handled: false, + type: mechanismType, + }, + }); + }); + + it('should capture async exceptions and rethrow', async () => { + const error = new Error('Test error'); + originalHandler.mockReturnValue(Promise.reject(error)); + + const decorated = wrappedDecorator('test-arg'); + decorated(mockTarget, 'testMethod', descriptor); + + await expect(descriptor.value()).rejects.toThrow(error); + expect(core.captureException).toHaveBeenCalledWith(error, { + mechanism: { + handled: false, + type: mechanismType, + }, + }); + }); + + it('should skip wrapping for internal Sentry handlers', () => { + const internalTarget = { + ...mockTarget, + __SENTRY_INTERNAL__: true, + }; + + const decorated = wrappedDecorator('test-arg'); + decorated(internalTarget, 'testMethod', descriptor); + + expect(descriptor.value).toBe(originalHandler); + }); + }); +}); diff --git a/packages/nextjs/package.json b/packages/nextjs/package.json index cd95937bb2e4..b88a980854ee 100644 --- a/packages/nextjs/package.json +++ b/packages/nextjs/package.json @@ -77,7 +77,7 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/semantic-conventions": "^1.37.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@rollup/plugin-commonjs": "28.0.1", "@sentry-internal/browser-utils": "10.43.0", "@sentry/bundler-plugin-core": "^5.1.0", @@ -112,8 +112,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.client.ts && madge --circular src/edge/index.ts && madge --circular src/index.server.ts && madge --circular src/index.types.ts", "clean": "rimraf build coverage sentry-nextjs-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "yarn test:unit", "test:all": "run-s test:unit", diff --git a/packages/nextjs/src/client/clientNormalizationIntegration.ts b/packages/nextjs/src/client/clientNormalizationIntegration.ts index c92147c82bbe..30c57ee8fc02 100644 --- a/packages/nextjs/src/client/clientNormalizationIntegration.ts +++ b/packages/nextjs/src/client/clientNormalizationIntegration.ts @@ -18,7 +18,7 @@ export const nextjsClientStackFrameNormalizationIntegration = defineIntegration( iteratee: frame => { if (experimentalThirdPartyOriginStackFrames) { // Not sure why but access to global WINDOW from @sentry/Browser causes hideous ci errors - // eslint-disable-next-line no-restricted-globals + // oxlint-disable-next-line typescript/prefer-optional-chain no-restricted-globals const windowOrigin = typeof window !== 'undefined' && window.location ? window.location.origin : ''; // A filename starting with the local origin and not ending with JS is most likely JS in HTML which we do not want to rewrite if (frame.filename?.startsWith(windowOrigin) && !frame.filename.endsWith('.js')) { diff --git a/packages/nextjs/src/common/devErrorSymbolicationEventProcessor.ts b/packages/nextjs/src/common/devErrorSymbolicationEventProcessor.ts index 3b02d92d80fb..c238e13efdf4 100644 --- a/packages/nextjs/src/common/devErrorSymbolicationEventProcessor.ts +++ b/packages/nextjs/src/common/devErrorSymbolicationEventProcessor.ts @@ -73,6 +73,7 @@ export async function devErrorSymbolicationEventProcessor(event: Event, hint: Ev // Due to changes across Next.js versions, there are a million things that can go wrong here so we just try-catch the // entire event processor. Symbolicated stack traces are just a nice to have. try { + // oxlint-disable-next-line typescript/prefer-optional-chain if (hint.originalException && hint.originalException instanceof Error && hint.originalException.stack) { const frames = stackTraceParser.parse(hint.originalException.stack); const nextJsVersion = globalWithInjectedValues._sentryNextJsVersion; diff --git a/packages/nextjs/src/common/pages-router-instrumentation/_error.ts b/packages/nextjs/src/common/pages-router-instrumentation/_error.ts index a82508d22e62..80103b42568f 100644 --- a/packages/nextjs/src/common/pages-router-instrumentation/_error.ts +++ b/packages/nextjs/src/common/pages-router-instrumentation/_error.ts @@ -48,6 +48,15 @@ export async function captureUnderscoreErrorException(contextOrProps: ContextOrP // return the existing event ID instead of capturing it again (needed for lastEventId() to work) if (err && isAlreadyCaptured(err)) { waitUntil(flushSafelyWithTimeout()); + + const storedEventId = + typeof err === 'object' ? (err as unknown as Record).__sentry_event_id__ : undefined; + + if (typeof storedEventId === 'string') { + getIsolationScope().setLastEventId(storedEventId); + return storedEventId; + } + return getIsolationScope().lastEventId(); } diff --git a/packages/nextjs/src/common/pages-router-instrumentation/wrapPageComponentWithSentry.ts b/packages/nextjs/src/common/pages-router-instrumentation/wrapPageComponentWithSentry.ts index 693341024726..ddbb123b458b 100644 --- a/packages/nextjs/src/common/pages-router-instrumentation/wrapPageComponentWithSentry.ts +++ b/packages/nextjs/src/common/pages-router-instrumentation/wrapPageComponentWithSentry.ts @@ -1,4 +1,10 @@ -import { captureException, extractTraceparentData, getCurrentScope, withIsolationScope } from '@sentry/core'; +import { + addNonEnumerableProperty, + captureException, + extractTraceparentData, + getCurrentScope, + withIsolationScope, +} from '@sentry/core'; interface FunctionComponent { (...args: unknown[]): unknown; @@ -11,6 +17,12 @@ interface ClassComponent { }; } +function storeCapturedEventIdOnError(error: unknown, eventId: string | undefined): void { + if (error && typeof error === 'object') { + addNonEnumerableProperty(error as Record, '__sentry_event_id__', eventId); + } +} + function isReactClassComponent(target: unknown): target is ClassComponent { // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access return typeof target === 'function' && target?.prototype?.isReactComponent; @@ -45,12 +57,13 @@ export function wrapPageComponentWithSentry(pageComponent: FunctionComponent | C try { return super.render(...args); } catch (e) { - captureException(e, { + const eventId = captureException(e, { mechanism: { handled: false, type: 'auto.function.nextjs.page_class', }, }); + storeCapturedEventIdOnError(e, eventId); throw e; } }); @@ -75,12 +88,13 @@ export function wrapPageComponentWithSentry(pageComponent: FunctionComponent | C try { return target.apply(thisArg, argArray); } catch (e) { - captureException(e, { + const eventId = captureException(e, { mechanism: { handled: false, type: 'auto.function.nextjs.page_function', }, }); + storeCapturedEventIdOnError(e, eventId); throw e; } }); diff --git a/packages/nextjs/src/common/withServerActionInstrumentation.ts b/packages/nextjs/src/common/withServerActionInstrumentation.ts index 2096d1004e01..5a2c884a8f85 100644 --- a/packages/nextjs/src/common/withServerActionInstrumentation.ts +++ b/packages/nextjs/src/common/withServerActionInstrumentation.ts @@ -120,6 +120,7 @@ async function withServerActionInstrumentationImplementation { + // oxlint-disable-next-line typescript/await-thenable -- callback may be async at runtime const result = await handleCallbackErrors(callback, error => { if (isNotFoundNavigationError(error)) { // We don't want to report "not-found"s diff --git a/packages/nextjs/src/config/handleRunAfterProductionCompile.ts b/packages/nextjs/src/config/handleRunAfterProductionCompile.ts index 901ba9dffa6e..f6e5a3b21617 100644 --- a/packages/nextjs/src/config/handleRunAfterProductionCompile.ts +++ b/packages/nextjs/src/config/handleRunAfterProductionCompile.ts @@ -1,5 +1,7 @@ import type { createSentryBuildPluginManager as createSentryBuildPluginManagerType } from '@sentry/bundler-plugin-core'; import { loadModule } from '@sentry/core'; +import * as fs from 'fs'; +import * as path from 'path'; import { getBuildPluginOptions } from './getBuildPluginOptions'; import type { SentryBuildOptions } from './types'; @@ -60,4 +62,65 @@ export async function handleRunAfterProductionCompile( prepareArtifacts: false, }); await sentryBuildPluginManager.deleteArtifacts(); + + // After deleting source map files in turbopack builds, strip any remaining + // sourceMappingURL comments from client JS files. Without this, browsers request + // the deleted .map files, and in Next.js 16 (turbopack) those requests fall through + // to the app router instead of returning 404, which can break middleware-dependent + // features like Clerk auth. + const deleteSourcemapsAfterUpload = sentryBuildOptions.sourcemaps?.deleteSourcemapsAfterUpload ?? false; + if (deleteSourcemapsAfterUpload && buildTool === 'turbopack') { + await stripSourceMappingURLComments(path.join(distDir, 'static'), sentryBuildOptions.debug); + } +} + +const SOURCEMAPPING_URL_COMMENT_REGEX = /\n?\/\/[#@] sourceMappingURL=[^\n]+$/; +const CSS_SOURCEMAPPING_URL_COMMENT_REGEX = /\n?\/\*[#@] sourceMappingURL=[^\n]+\*\/$/; + +/** + * Strips sourceMappingURL comments from all JS/MJS/CJS/CSS files in the given directory. + * This prevents browsers from requesting deleted .map files. + */ +export async function stripSourceMappingURLComments(staticDir: string, debug?: boolean): Promise { + let entries: string[]; + try { + entries = await fs.promises.readdir(staticDir, { recursive: true }).then(e => e.map(f => String(f))); + } catch { + // Directory may not exist (e.g., no static output) + return; + } + + const filesToProcess = entries.filter( + f => f.endsWith('.js') || f.endsWith('.mjs') || f.endsWith('.cjs') || f.endsWith('.css'), + ); + + const results = await Promise.all( + filesToProcess.map(async file => { + const filePath = path.join(staticDir, file); + try { + const content = await fs.promises.readFile(filePath, 'utf-8'); + + const isCSS = file.endsWith('.css'); + const regex = isCSS ? CSS_SOURCEMAPPING_URL_COMMENT_REGEX : SOURCEMAPPING_URL_COMMENT_REGEX; + + const strippedContent = content.replace(regex, ''); + if (strippedContent !== content) { + await fs.promises.writeFile(filePath, strippedContent, 'utf-8'); + return file; + } + } catch { + // Skip files that can't be read/written + } + return undefined; + }), + ); + + const strippedCount = results.filter(Boolean).length; + + if (debug && strippedCount > 0) { + // eslint-disable-next-line no-console + console.debug( + `[@sentry/nextjs] Stripped sourceMappingURL comments from ${String(strippedCount)} file(s) to prevent requests for deleted source maps.`, + ); + } } diff --git a/packages/nextjs/src/server/handleOnSpanStart.ts b/packages/nextjs/src/server/handleOnSpanStart.ts index c8e2215b2aaf..21af973f2b2f 100644 --- a/packages/nextjs/src/server/handleOnSpanStart.ts +++ b/packages/nextjs/src/server/handleOnSpanStart.ts @@ -16,6 +16,7 @@ import { addHeadersAsAttributes } from '../common/utils/addHeadersAsAttributes'; import { dropMiddlewareTunnelRequests } from '../common/utils/dropMiddlewareTunnelRequests'; import { maybeEnhanceServerComponentSpanName } from '../common/utils/tracingUtils'; import { maybeStartCronCheckIn } from './vercelCronsMonitoring'; +import { maybeEnrichQueueConsumerSpan, maybeEnrichQueueProducerSpan } from './vercelQueuesMonitoring'; /** * Handles the on span start event for Next.js spans. @@ -56,6 +57,9 @@ export function handleOnSpanStart(span: Span): void { // Check if this is a Vercel cron request and start a check-in maybeStartCronCheckIn(rootSpan, route); + + // Enrich queue consumer spans (Vercel Queue push delivery via CloudEvent) + maybeEnrichQueueConsumerSpan(rootSpan); } } @@ -96,4 +100,7 @@ export function handleOnSpanStart(span: Span): void { } maybeEnhanceServerComponentSpanName(span, spanAttributes, rootSpanAttributes); + + // Enrich outgoing http.client spans targeting the Vercel Queues API (producer) + maybeEnrichQueueProducerSpan(span); } diff --git a/packages/nextjs/src/server/index.ts b/packages/nextjs/src/server/index.ts index eca9b586423f..343cfd8bb218 100644 --- a/packages/nextjs/src/server/index.ts +++ b/packages/nextjs/src/server/index.ts @@ -37,6 +37,7 @@ import { distDirRewriteFramesIntegration } from './distDirRewriteFramesIntegrati import { handleOnSpanStart } from './handleOnSpanStart'; import { prepareSafeIdGeneratorContext } from './prepareSafeIdGeneratorContext'; import { maybeCompleteCronCheckIn } from './vercelCronsMonitoring'; +import { maybeCleanupQueueSpan } from './vercelQueuesMonitoring'; export * from '@sentry/node'; @@ -193,6 +194,7 @@ export function init(options: NodeOptions): NodeClient | undefined { client?.on('spanStart', handleOnSpanStart); client?.on('spanEnd', maybeCompleteCronCheckIn); + client?.on('spanEnd', maybeCleanupQueueSpan); getGlobalScope().addEventProcessor( Object.assign( diff --git a/packages/nextjs/src/server/vercelQueuesMonitoring.ts b/packages/nextjs/src/server/vercelQueuesMonitoring.ts new file mode 100644 index 000000000000..cfe367c46470 --- /dev/null +++ b/packages/nextjs/src/server/vercelQueuesMonitoring.ts @@ -0,0 +1,124 @@ +import type { Span } from '@sentry/core'; +import { getIsolationScope, spanToJSON } from '@sentry/core'; + +// OTel Messaging semantic convention attribute keys +const ATTR_MESSAGING_SYSTEM = 'messaging.system'; +const ATTR_MESSAGING_DESTINATION_NAME = 'messaging.destination.name'; +const ATTR_MESSAGING_MESSAGE_ID = 'messaging.message.id'; +const ATTR_MESSAGING_OPERATION_NAME = 'messaging.operation.name'; +const ATTR_MESSAGING_CONSUMER_GROUP_NAME = 'messaging.consumer.group.name'; +const ATTR_MESSAGING_MESSAGE_DELIVERY_COUNT = 'messaging.message.delivery_count'; + +// Marker attribute to track enriched spans for cleanup +const ATTR_SENTRY_QUEUE_ENRICHED = 'sentry.queue.enriched'; + +/** + * Checks if the incoming request is a Vercel Queue consumer callback (push mode) + * and enriches the http.server span with OTel messaging semantic attributes. + * + * Vercel Queues push delivery sends a CloudEvent POST with the header: + * ce-type: com.vercel.queue.v2beta + * along with ce-vqs* headers carrying queue metadata. + */ +export function maybeEnrichQueueConsumerSpan(span: Span): void { + const headers = getIsolationScope().getScopeData().sdkProcessingMetadata?.normalizedRequest?.headers as + | Record + | undefined; + + if (!headers) { + return; + } + + const ceType = Array.isArray(headers['ce-type']) ? headers['ce-type'][0] : headers['ce-type']; + if (ceType !== 'com.vercel.queue.v2beta') { + return; + } + + const queueName = getHeader(headers, 'ce-vqsqueuename'); + const messageId = getHeader(headers, 'ce-vqsmessageid'); + const consumerGroup = getHeader(headers, 'ce-vqsconsumergroup'); + const deliveryCount = getHeader(headers, 'ce-vqsdeliverycount'); + + span.setAttribute(ATTR_MESSAGING_SYSTEM, 'vercel.queue'); + span.setAttribute(ATTR_MESSAGING_OPERATION_NAME, 'process'); + + if (queueName) { + span.setAttribute(ATTR_MESSAGING_DESTINATION_NAME, queueName); + } + + if (messageId) { + span.setAttribute(ATTR_MESSAGING_MESSAGE_ID, messageId); + } + + if (consumerGroup) { + span.setAttribute(ATTR_MESSAGING_CONSUMER_GROUP_NAME, consumerGroup); + } + + if (deliveryCount) { + const count = parseInt(deliveryCount, 10); + if (!isNaN(count)) { + span.setAttribute(ATTR_MESSAGING_MESSAGE_DELIVERY_COUNT, count); + } + } + + // Mark span so we can clean up marker on spanEnd + span.setAttribute(ATTR_SENTRY_QUEUE_ENRICHED, true); +} + +/** + * Checks if an outgoing http.client span targets the Vercel Queues API + * and enriches it with OTel messaging semantic attributes (producer side). + * + * The Vercel Queues API lives at *.vercel-queue.com/api/v3/topic/. + * We use domain-based detection to avoid false positives from user routes. + */ +export function maybeEnrichQueueProducerSpan(span: Span): void { + const spanData = spanToJSON(span).data; + + // http.client spans have url.full attribute + const urlFull = spanData?.['url.full'] as string | undefined; + if (!urlFull) { + return; + } + + let parsed: URL; + try { + parsed = new URL(urlFull); + } catch { + return; + } + + if (parsed.hostname !== 'vercel-queue.com' && !parsed.hostname.endsWith('.vercel-queue.com')) { + return; + } + + // Extract topic from path: /api/v3/topic/[/] + const topicMatch = parsed.pathname.match(/^\/api\/v3\/topic\/([^/]+)/); + if (!topicMatch) { + return; + } + + const topic = decodeURIComponent(topicMatch[1]!); + + span.setAttribute(ATTR_MESSAGING_SYSTEM, 'vercel.queue'); + span.setAttribute(ATTR_MESSAGING_DESTINATION_NAME, topic); + span.setAttribute(ATTR_MESSAGING_OPERATION_NAME, 'send'); + + // Mark span so we can clean up marker on spanEnd + span.setAttribute(ATTR_SENTRY_QUEUE_ENRICHED, true); +} + +/** + * Cleans up the internal marker attribute from enriched queue spans on end. + */ +export function maybeCleanupQueueSpan(span: Span): void { + const spanData = spanToJSON(span).data; + if (spanData?.[ATTR_SENTRY_QUEUE_ENRICHED]) { + span.setAttribute(ATTR_SENTRY_QUEUE_ENRICHED, undefined); + } +} + +function getHeader(headers: Record, name: string): string | undefined { + const value = headers[name]; + return Array.isArray(value) ? value[0] : value; +} diff --git a/packages/nextjs/test/common/pages-router-instrumentation/captureUnderscoreErrorException.test.ts b/packages/nextjs/test/common/pages-router-instrumentation/captureUnderscoreErrorException.test.ts index 250052d3c991..796f4b2be663 100644 --- a/packages/nextjs/test/common/pages-router-instrumentation/captureUnderscoreErrorException.test.ts +++ b/packages/nextjs/test/common/pages-router-instrumentation/captureUnderscoreErrorException.test.ts @@ -3,7 +3,7 @@ import { captureUnderscoreErrorException } from '../../../src/common/pages-route let storedLastEventId: string | undefined = undefined; -const mockCaptureException = vi.fn(() => 'test-event-id'); +const mockCaptureException = vi.fn((_exception?: unknown, _hint?: unknown) => 'test-event-id'); const mockWithScope = vi.fn((callback: (scope: any) => any) => { const mockScope = { setSDKProcessingMetadata: vi.fn(), @@ -21,7 +21,7 @@ vi.mock('@sentry/core', async () => { const actual = await vi.importActual('@sentry/core'); return { ...actual, - captureException: (...args: unknown[]) => mockCaptureException(...args), + captureException: (exception: unknown, hint?: unknown) => mockCaptureException(exception, hint), withScope: (callback: (scope: any) => any) => mockWithScope(callback), httpRequestToRequestData: vi.fn(() => ({ url: 'http://test.com' })), lastEventId: () => mockGetIsolationScope().lastEventId(), @@ -146,6 +146,23 @@ describe('captureUnderscoreErrorException', () => { expect(mockCaptureException).not.toHaveBeenCalled(); }); + it('should prefer the stored event ID on already captured errors', async () => { + storedLastEventId = 'scope-event-id'; + + const error = new Error('Already captured render error'); + (error as any).__sentry_captured__ = true; + (error as any).__sentry_event_id__ = 'stored-event-id'; + + const eventId = await captureUnderscoreErrorException({ + err: error, + pathname: '/test', + res: { statusCode: 500 } as any, + }); + + expect(eventId).toBe('stored-event-id'); + expect(mockCaptureException).not.toHaveBeenCalled(); + }); + it('should capture string errors even if they were marked as captured', async () => { // String errors can't have __sentry_captured__ property, so they should always be captured const errorString = 'String error'; diff --git a/packages/nextjs/test/config/conflictingDebugOptions.test.ts b/packages/nextjs/test/config/conflictingDebugOptions.test.ts index 8c0920382c4a..5e7a46997b2b 100644 --- a/packages/nextjs/test/config/conflictingDebugOptions.test.ts +++ b/packages/nextjs/test/config/conflictingDebugOptions.test.ts @@ -20,7 +20,15 @@ describe('debug: true + removeDebugLogging warning', () => { let originalLocation: unknown; let originalAddEventListener: unknown; - beforeAll(() => { + beforeAll(async () => { + // Pre-warm V8 compilation cache for the large SDK module graphs. + // Without this, the first dynamic import after vi.resetModules() can hang + // because vitest needs to compile the entire module graph from scratch. + await import('../../src/client/index.js'); + await import('../../src/server/index.js'); + await import('../../src/edge/index.js'); + vi.resetModules(); + dom = new JSDOM('', { url: 'https://example.com/' }); originalDocument = (globalThis as any).document; diff --git a/packages/nextjs/test/config/handleRunAfterProductionCompile.test.ts b/packages/nextjs/test/config/handleRunAfterProductionCompile.test.ts index 2d1769986158..3d551dfb6c40 100644 --- a/packages/nextjs/test/config/handleRunAfterProductionCompile.test.ts +++ b/packages/nextjs/test/config/handleRunAfterProductionCompile.test.ts @@ -1,6 +1,12 @@ import { loadModule } from '@sentry/core'; -import { beforeEach, describe, expect, it, vi } from 'vitest'; -import { handleRunAfterProductionCompile } from '../../src/config/handleRunAfterProductionCompile'; +import * as fs from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { + handleRunAfterProductionCompile, + stripSourceMappingURLComments, +} from '../../src/config/handleRunAfterProductionCompile'; import type { SentryBuildOptions } from '../../src/config/types'; vi.mock('@sentry/core', () => ({ @@ -305,6 +311,85 @@ describe('handleRunAfterProductionCompile', () => { }); }); + describe('sourceMappingURL stripping', () => { + let readdirSpy: ReturnType; + + beforeEach(() => { + // Spy on fs.promises.readdir to detect whether stripping was attempted. + // The actual readdir will fail (dir doesn't exist), which is fine — we just + // need to know if it was called. + readdirSpy = vi.spyOn(fs.promises, 'readdir').mockRejectedValue(new Error('ENOENT')); + }); + + afterEach(() => { + readdirSpy.mockRestore(); + }); + + it('strips sourceMappingURL comments for turbopack builds with deleteSourcemapsAfterUpload', async () => { + await handleRunAfterProductionCompile( + { + releaseName: 'test-release', + distDir: '/path/to/.next', + buildTool: 'turbopack', + }, + { + ...mockSentryBuildOptions, + sourcemaps: { deleteSourcemapsAfterUpload: true }, + }, + ); + + expect(readdirSpy).toHaveBeenCalledWith( + path.join('/path/to/.next', 'static'), + expect.objectContaining({ recursive: true }), + ); + }); + + it('does NOT strip sourceMappingURL comments for webpack builds even with deleteSourcemapsAfterUpload', async () => { + await handleRunAfterProductionCompile( + { + releaseName: 'test-release', + distDir: '/path/to/.next', + buildTool: 'webpack', + }, + { + ...mockSentryBuildOptions, + sourcemaps: { deleteSourcemapsAfterUpload: true }, + }, + ); + + expect(readdirSpy).not.toHaveBeenCalled(); + }); + + it('does NOT strip sourceMappingURL comments when deleteSourcemapsAfterUpload is false', async () => { + await handleRunAfterProductionCompile( + { + releaseName: 'test-release', + distDir: '/path/to/.next', + buildTool: 'turbopack', + }, + { + ...mockSentryBuildOptions, + sourcemaps: { deleteSourcemapsAfterUpload: false }, + }, + ); + + expect(readdirSpy).not.toHaveBeenCalled(); + }); + + it('does NOT strip sourceMappingURL comments when deleteSourcemapsAfterUpload is undefined', async () => { + await handleRunAfterProductionCompile( + { + releaseName: 'test-release', + distDir: '/path/to/.next', + buildTool: 'turbopack', + }, + mockSentryBuildOptions, + ); + + expect(readdirSpy).not.toHaveBeenCalled(); + }); + }); + describe('path handling', () => { it('correctly passes distDir to debug ID injection', async () => { const customDistDir = '/custom/dist/path'; @@ -343,3 +428,112 @@ describe('handleRunAfterProductionCompile', () => { }); }); }); + +describe('stripSourceMappingURLComments', () => { + let tmpDir: string; + + beforeEach(async () => { + tmpDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'sentry-test-')); + await fs.promises.mkdir(path.join(tmpDir, 'chunks'), { recursive: true }); + }); + + afterEach(async () => { + await fs.promises.rm(tmpDir, { recursive: true, force: true }); + }); + + it('strips sourceMappingURL comment from JS files', async () => { + const filePath = path.join(tmpDir, 'chunks', 'abc123.js'); + await fs.promises.writeFile(filePath, 'console.log("hello");\n//# sourceMappingURL=abc123.js.map'); + + await stripSourceMappingURLComments(tmpDir); + + const content = await fs.promises.readFile(filePath, 'utf-8'); + expect(content).toBe('console.log("hello");'); + expect(content).not.toContain('sourceMappingURL'); + }); + + it('strips sourceMappingURL comment from MJS files', async () => { + const filePath = path.join(tmpDir, 'chunks', 'module.mjs'); + await fs.promises.writeFile(filePath, 'export default 42;\n//# sourceMappingURL=module.mjs.map'); + + await stripSourceMappingURLComments(tmpDir); + + const content = await fs.promises.readFile(filePath, 'utf-8'); + expect(content).toBe('export default 42;'); + }); + + it('strips sourceMappingURL comment from CSS files', async () => { + const filePath = path.join(tmpDir, 'chunks', 'styles.css'); + await fs.promises.writeFile(filePath, '.foo { color: red; }\n/*# sourceMappingURL=styles.css.map */'); + + await stripSourceMappingURLComments(tmpDir); + + const content = await fs.promises.readFile(filePath, 'utf-8'); + expect(content).toBe('.foo { color: red; }'); + }); + + it('does not modify files without sourceMappingURL comments', async () => { + const filePath = path.join(tmpDir, 'chunks', 'clean.js'); + const originalContent = 'console.log("no source map ref");'; + await fs.promises.writeFile(filePath, originalContent); + + await stripSourceMappingURLComments(tmpDir); + + const content = await fs.promises.readFile(filePath, 'utf-8'); + expect(content).toBe(originalContent); + }); + + it('handles files in nested subdirectories', async () => { + const nestedDir = path.join(tmpDir, 'chunks', 'app', 'page'); + await fs.promises.mkdir(nestedDir, { recursive: true }); + const filePath = path.join(nestedDir, 'layout.js'); + await fs.promises.writeFile(filePath, 'var x = 1;\n//# sourceMappingURL=layout.js.map'); + + await stripSourceMappingURLComments(tmpDir); + + const content = await fs.promises.readFile(filePath, 'utf-8'); + expect(content).toBe('var x = 1;'); + }); + + it('handles non-existent directory gracefully', async () => { + await expect(stripSourceMappingURLComments('/nonexistent/path')).resolves.toBeUndefined(); + }); + + it('handles sourceMappingURL with @-style comment', async () => { + const filePath = path.join(tmpDir, 'chunks', 'legacy.js'); + await fs.promises.writeFile(filePath, 'var y = 2;\n//@ sourceMappingURL=legacy.js.map'); + + await stripSourceMappingURLComments(tmpDir); + + const content = await fs.promises.readFile(filePath, 'utf-8'); + expect(content).toBe('var y = 2;'); + }); + + it('ignores non-JS/CSS files', async () => { + const filePath = path.join(tmpDir, 'chunks', 'data.json'); + const originalContent = '{"key": "value"}\n//# sourceMappingURL=data.json.map'; + await fs.promises.writeFile(filePath, originalContent); + + await stripSourceMappingURLComments(tmpDir); + + const content = await fs.promises.readFile(filePath, 'utf-8'); + expect(content).toBe(originalContent); + }); + + it('processes multiple files concurrently', async () => { + const files = ['a.js', 'b.mjs', 'c.cjs', 'd.css']; + for (const file of files) { + const ext = path.extname(file); + const comment = ext === '.css' ? `/*# sourceMappingURL=${file}.map */` : `//# sourceMappingURL=${file}.map`; + await fs.promises.writeFile(path.join(tmpDir, file), `content_${file}\n${comment}`); + } + + await stripSourceMappingURLComments(tmpDir); + + for (const file of files) { + const content = await fs.promises.readFile(path.join(tmpDir, file), 'utf-8'); + expect(content).toBe(`content_${file}`); + expect(content).not.toContain('sourceMappingURL'); + } + }); +}); diff --git a/packages/nextjs/test/server/vercelQueuesMonitoring.test.ts b/packages/nextjs/test/server/vercelQueuesMonitoring.test.ts new file mode 100644 index 000000000000..397e49943edf --- /dev/null +++ b/packages/nextjs/test/server/vercelQueuesMonitoring.test.ts @@ -0,0 +1,194 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +let mockHeaders: Record | undefined; + +vi.mock('@sentry/core', () => ({ + getIsolationScope: () => ({ + getScopeData: () => ({ + sdkProcessingMetadata: { + normalizedRequest: mockHeaders !== undefined ? { headers: mockHeaders } : undefined, + }, + }), + }), + spanToJSON: (span: { _data: Record }) => ({ + data: span._data, + }), +})); + +import { + maybeCleanupQueueSpan, + maybeEnrichQueueConsumerSpan, + maybeEnrichQueueProducerSpan, +} from '../../src/server/vercelQueuesMonitoring'; + +function createMockSpan(data: Record = {}): { + _data: Record; + setAttribute: (key: string, value: unknown) => void; +} { + const _data = { ...data }; + return { + _data, + setAttribute: (key: string, value: unknown) => { + if (value === undefined) { + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete _data[key]; + } else { + _data[key] = value; + } + }, + }; +} + +describe('vercelQueuesMonitoring', () => { + beforeEach(() => { + mockHeaders = undefined; + }); + + afterEach(() => { + vi.clearAllMocks(); + }); + + describe('maybeEnrichQueueConsumerSpan', () => { + it('does nothing when there are no headers', () => { + mockHeaders = undefined; + const span = createMockSpan(); + maybeEnrichQueueConsumerSpan(span as any); + expect(span._data).toEqual({}); + }); + + it('does nothing when ce-type header is missing', () => { + mockHeaders = { 'content-type': 'application/json' }; + const span = createMockSpan(); + maybeEnrichQueueConsumerSpan(span as any); + expect(span._data).toEqual({}); + }); + + it('does nothing when ce-type is not com.vercel.queue.v2beta', () => { + mockHeaders = { 'ce-type': 'com.other.event' }; + const span = createMockSpan(); + maybeEnrichQueueConsumerSpan(span as any); + expect(span._data).toEqual({}); + }); + + it('enriches span with messaging attributes when ce-type matches', () => { + mockHeaders = { + 'ce-type': 'com.vercel.queue.v2beta', + 'ce-vqsqueuename': 'orders', + 'ce-vqsmessageid': 'msg-123', + 'ce-vqsconsumergroup': 'default', + 'ce-vqsdeliverycount': '3', + }; + const span = createMockSpan(); + maybeEnrichQueueConsumerSpan(span as any); + + expect(span._data['messaging.system']).toBe('vercel.queue'); + expect(span._data['messaging.operation.name']).toBe('process'); + expect(span._data['messaging.destination.name']).toBe('orders'); + expect(span._data['messaging.message.id']).toBe('msg-123'); + expect(span._data['messaging.consumer.group.name']).toBe('default'); + expect(span._data['messaging.message.delivery_count']).toBe(3); + expect(span._data['sentry.queue.enriched']).toBe(true); + }); + + it('handles missing optional headers gracefully', () => { + mockHeaders = { 'ce-type': 'com.vercel.queue.v2beta' }; + const span = createMockSpan(); + maybeEnrichQueueConsumerSpan(span as any); + + expect(span._data['messaging.system']).toBe('vercel.queue'); + expect(span._data['messaging.operation.name']).toBe('process'); + expect(span._data['messaging.destination.name']).toBeUndefined(); + expect(span._data['messaging.message.id']).toBeUndefined(); + }); + + it('ignores non-numeric delivery count', () => { + mockHeaders = { + 'ce-type': 'com.vercel.queue.v2beta', + 'ce-vqsdeliverycount': 'not-a-number', + }; + const span = createMockSpan(); + maybeEnrichQueueConsumerSpan(span as any); + + expect(span._data['messaging.message.delivery_count']).toBeUndefined(); + }); + }); + + describe('maybeEnrichQueueProducerSpan', () => { + it('does nothing when url.full is missing', () => { + const span = createMockSpan(); + maybeEnrichQueueProducerSpan(span as any); + expect(span._data).toEqual({}); + }); + + it('does nothing for non-vercel-queue URLs', () => { + const span = createMockSpan({ 'url.full': 'https://example.com/api/v3/topic/orders' }); + maybeEnrichQueueProducerSpan(span as any); + expect(span._data['messaging.system']).toBeUndefined(); + }); + + it('does nothing for hostname that is a suffix match but not a subdomain', () => { + const span = createMockSpan({ 'url.full': 'https://evil-vercel-queue.com/api/v3/topic/orders' }); + maybeEnrichQueueProducerSpan(span as any); + expect(span._data['messaging.system']).toBeUndefined(); + }); + + it('does nothing for vercel-queue.com URLs without topic path', () => { + const span = createMockSpan({ 'url.full': 'https://queue.vercel-queue.com/api/v3/other' }); + maybeEnrichQueueProducerSpan(span as any); + expect(span._data['messaging.system']).toBeUndefined(); + }); + + it('enriches span for vercel-queue.com topic URLs', () => { + const span = createMockSpan({ 'url.full': 'https://queue.vercel-queue.com/api/v3/topic/orders' }); + maybeEnrichQueueProducerSpan(span as any); + + expect(span._data['messaging.system']).toBe('vercel.queue'); + expect(span._data['messaging.destination.name']).toBe('orders'); + expect(span._data['messaging.operation.name']).toBe('send'); + expect(span._data['sentry.queue.enriched']).toBe(true); + }); + + it('handles URL-encoded topic names', () => { + const span = createMockSpan({ + 'url.full': 'https://queue.vercel-queue.com/api/v3/topic/my%20topic', + }); + maybeEnrichQueueProducerSpan(span as any); + + expect(span._data['messaging.destination.name']).toBe('my topic'); + }); + + it('extracts topic when URL has additional path segments', () => { + const span = createMockSpan({ + 'url.full': 'https://queue.vercel-queue.com/api/v3/topic/orders/msg-123', + }); + maybeEnrichQueueProducerSpan(span as any); + + expect(span._data['messaging.destination.name']).toBe('orders'); + }); + + it('handles invalid URLs gracefully', () => { + const span = createMockSpan({ 'url.full': 'not-a-url' }); + maybeEnrichQueueProducerSpan(span as any); + expect(span._data['messaging.system']).toBeUndefined(); + }); + }); + + describe('maybeCleanupQueueSpan', () => { + it('removes the enriched marker attribute', () => { + const span = createMockSpan({ + 'messaging.system': 'vercel.queue', + 'sentry.queue.enriched': true, + }); + maybeCleanupQueueSpan(span as any); + + expect(span._data['sentry.queue.enriched']).toBeUndefined(); + expect(span._data['messaging.system']).toBe('vercel.queue'); + }); + + it('does nothing for non-enriched spans', () => { + const span = createMockSpan({ 'some.attribute': 'value' }); + maybeCleanupQueueSpan(span as any); + expect(span._data).toEqual({ 'some.attribute': 'value' }); + }); + }); +}); diff --git a/packages/node-core/package.json b/packages/node-core/package.json index d68b587a8993..f49f700535e8 100644 --- a/packages/node-core/package.json +++ b/packages/node-core/package.json @@ -101,16 +101,16 @@ "dependencies": { "@sentry/core": "10.43.0", "@sentry/opentelemetry": "10.43.0", - "import-in-the-middle": "^2.0.6" + "import-in-the-middle": "^3.0.0" }, "devDependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/context-async-hooks": "^2.5.1", - "@opentelemetry/core": "^2.5.1", - "@opentelemetry/instrumentation": "^0.211.0", - "@opentelemetry/resources": "^2.5.1", - "@opentelemetry/sdk-trace-base": "^2.5.1", - "@opentelemetry/semantic-conventions": "^1.39.0", + "@opentelemetry/context-async-hooks": "^2.6.0", + "@opentelemetry/core": "^2.6.0", + "@opentelemetry/instrumentation": "^0.213.0", + "@opentelemetry/resources": "^2.6.0", + "@opentelemetry/sdk-trace-base": "^2.6.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@types/node": "^18.19.1" }, "scripts": { @@ -126,8 +126,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-node-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/cjs/*.js && es-check es2022 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/node-core/src/cron/node-cron.ts b/packages/node-core/src/cron/node-cron.ts index 763b260353cf..71bf2d913313 100644 --- a/packages/node-core/src/cron/node-cron.ts +++ b/packages/node-core/src/cron/node-cron.ts @@ -58,6 +58,7 @@ export function instrumentNodeCron( // We have to manually catch here and capture the exception because node-cron swallows errors // https://github.com/node-cron/node-cron/issues/399 try { + // oxlint-disable-next-line typescript/await-thenable -- callback may be async at runtime return await callback(...args); } catch (e) { captureException(e, { diff --git a/packages/node-core/src/cron/node-schedule.ts b/packages/node-core/src/cron/node-schedule.ts index 35db51618b9a..f174ef7c904b 100644 --- a/packages/node-core/src/cron/node-schedule.ts +++ b/packages/node-core/src/cron/node-schedule.ts @@ -49,6 +49,7 @@ export function instrumentNodeSchedule(lib: T & NodeSchedule): T { return withMonitor( monitorSlug, async () => { + // oxlint-disable-next-line typescript/await-thenable -- callback may be async at runtime await callback?.(); }, { diff --git a/packages/node-core/src/integrations/http/SentryHttpInstrumentation.ts b/packages/node-core/src/integrations/http/SentryHttpInstrumentation.ts index f8a10b0a1f8b..b25d32138aa9 100644 --- a/packages/node-core/src/integrations/http/SentryHttpInstrumentation.ts +++ b/packages/node-core/src/integrations/http/SentryHttpInstrumentation.ts @@ -1,24 +1,48 @@ +/* eslint-disable max-lines */ import type { ChannelListener } from 'node:diagnostics_channel'; import { subscribe, unsubscribe } from 'node:diagnostics_channel'; +import { errorMonitor } from 'node:events'; import type * as http from 'node:http'; import type * as https from 'node:https'; -import { context } from '@opentelemetry/api'; +import { context, SpanStatusCode, trace } from '@opentelemetry/api'; import { isTracingSuppressed } from '@opentelemetry/core'; import type { InstrumentationConfig } from '@opentelemetry/instrumentation'; import { InstrumentationBase, InstrumentationNodeModuleDefinition } from '@opentelemetry/instrumentation'; -import type { Span } from '@sentry/core'; -import { debug, LRUMap, SDK_VERSION } from '@sentry/core'; +import { + ATTR_HTTP_RESPONSE_STATUS_CODE, + ATTR_NETWORK_PEER_ADDRESS, + ATTR_NETWORK_PEER_PORT, + ATTR_NETWORK_PROTOCOL_VERSION, + ATTR_NETWORK_TRANSPORT, + ATTR_URL_FULL, + ATTR_USER_AGENT_ORIGINAL, + SEMATTRS_HTTP_RESPONSE_CONTENT_LENGTH, + SEMATTRS_HTTP_RESPONSE_CONTENT_LENGTH_UNCOMPRESSED, +} from '@opentelemetry/semantic-conventions'; +import type { Span, SpanAttributes, SpanStatus } from '@sentry/core'; +import { + debug, + getHttpSpanDetailsFromUrlObject, + getSpanStatusFromHttpCode, + LRUMap, + parseStringToURLObject, + SDK_VERSION, + SEMANTIC_ATTRIBUTE_SENTRY_OP, + startInactiveSpan, +} from '@sentry/core'; import { DEBUG_BUILD } from '../../debug-build'; -import { getRequestUrl } from '../../utils/getRequestUrl'; import { INSTRUMENTATION_NAME } from './constants'; import { addRequestBreadcrumb, addTracePropagationHeadersToOutgoingRequest, + getClientRequestUrl, getRequestOptions, } from './outgoing-requests'; type Http = typeof http; type Https = typeof https; +type IncomingHttpHeaders = http.IncomingHttpHeaders; +type OutgoingHttpHeaders = http.OutgoingHttpHeaders; export type SentryHttpInstrumentationOptions = InstrumentationConfig & { /** @@ -37,6 +61,26 @@ export type SentryHttpInstrumentationOptions = InstrumentationConfig & { */ propagateTraceInOutgoingRequests?: boolean; + /** + * Whether to enable the capability to create spans for outgoing requests via diagnostic channels. + * If enabled, spans will only be created if the `spans` option is also enabled (default: true). + * + * This is a feature flag that should be enabled by SDKs when the runtime supports it (Node 22.12+). + * Individual users should not need to configure this directly. + * + * @default `false` + */ + createSpansForOutgoingRequests?: boolean; + + /** + * Whether to create spans for outgoing requests (user preference). + * This only takes effect if `createSpansForOutgoingRequests` is also enabled. + * If `createSpansForOutgoingRequests` is not enabled, this option is ignored. + * + * @default `true` + */ + spans?: boolean; + /** * Do not capture breadcrumbs for outgoing HTTP requests to URLs where the given callback returns `true`. * For the scope of this instrumentation, this callback only controls breadcrumb creation. @@ -48,13 +92,20 @@ export type SentryHttpInstrumentationOptions = InstrumentationConfig & { */ ignoreOutgoingRequests?: (url: string, request: http.RequestOptions) => boolean; - // All options below do not do anything anymore in this instrumentation, and will be removed in the future. - // They are only kept here for backwards compatibility - the respective functionality is now handled by the httpServerIntegration/httpServerSpansIntegration. - /** - * @deprecated This no longer does anything. + * Hooks for outgoing request spans, called when `createSpansForOutgoingRequests` is enabled. + * These mirror the OTEL HttpInstrumentation hooks for backwards compatibility. */ - spans?: boolean; + outgoingRequestHook?: (span: Span, request: http.ClientRequest) => void; + outgoingResponseHook?: (span: Span, response: http.IncomingMessage) => void; + outgoingRequestApplyCustomAttributes?: ( + span: Span, + request: http.ClientRequest, + response: http.IncomingMessage, + ) => void; + + // All options below do not do anything anymore in this instrumentation, and will be removed in the future. + // They are only kept here for backwards compatibility - the respective functionality is now handled by the httpServerIntegration/httpServerSpansIntegration. /** * @depreacted This no longer does anything. @@ -111,14 +162,17 @@ export type SentryHttpInstrumentationOptions = InstrumentationConfig & { }; /** - * This custom HTTP instrumentation is used to isolate incoming requests and annotate them with additional information. - * It does not emit any spans. + * This custom HTTP instrumentation handles outgoing HTTP requests. * - * The reason this is isolated from the OpenTelemetry instrumentation is that users may overwrite this, - * which would lead to Sentry not working as expected. + * It provides: + * - Breadcrumbs for all outgoing requests + * - Trace propagation headers (when enabled) + * - Span creation for outgoing requests (when createSpansForOutgoingRequests is enabled) + * + * Span creation requires Node 22+ and uses diagnostic channels to avoid monkey-patching. + * By default, this is only enabled in the node SDK, not in node-core or other runtime SDKs. * * Important note: Contrary to other OTEL instrumentation, this one cannot be unwrapped. - * It only does minimal things though and does not emit any spans. * * This is heavily inspired & adapted from: * https://github.com/open-telemetry/opentelemetry-js/blob/f8ab5592ddea5cba0a3b33bf8d74f27872c0367f/experimental/packages/opentelemetry-instrumentation-http/src/http.ts @@ -168,13 +222,12 @@ export class SentryHttpInstrumentation extends InstrumentationBase) { + const [event] = args; + if (event !== 'response') { + return target.apply(thisArg, args); + } + + const parentContext = context.active(); + const requestContext = trace.setSpan(parentContext, span); + + return context.with(requestContext, () => { + return target.apply(thisArg, args); + }); + }, + }); + + // eslint-disable-next-line deprecation/deprecation + request.once = newOnce; + + /** + * Determines if the request has errored or the response has ended/errored. + */ + let responseFinished = false; + + const endSpan = (status: SpanStatus): void => { + if (responseFinished) { + return; + } + responseFinished = true; + + span.setStatus(status); + span.end(); + }; + + request.prependListener('response', response => { + if (request.listenerCount('response') <= 1) { + response.resume(); + } + + context.bind(context.active(), response); + + const additionalAttributes = _getOutgoingRequestEndedSpanData(response); + span.setAttributes(additionalAttributes); + + this.getConfig().outgoingResponseHook?.(span, response); + this.getConfig().outgoingRequestApplyCustomAttributes?.(span, request, response); + + const endHandler = (forceError: boolean = false): void => { + this._diag.debug('outgoingRequest on end()'); + + const status = + // eslint-disable-next-line deprecation/deprecation + forceError || typeof response.statusCode !== 'number' || (response.aborted && !response.complete) + ? { code: SpanStatusCode.ERROR } + : getSpanStatusFromHttpCode(response.statusCode); + + endSpan(status); + }; + + response.on('end', () => { + endHandler(); + }); + response.on(errorMonitor, error => { + this._diag.debug('outgoingRequest on response error()', error); + endHandler(true); + }); + }); + + // Fallback if proper response end handling above fails + request.on('close', () => { + endSpan({ code: SpanStatusCode.UNSET }); + }); + request.on(errorMonitor, error => { + this._diag.debug('outgoingRequest on request error()', error); + endSpan({ code: SpanStatusCode.ERROR }); + }); + + return span; + } + /** * This is triggered when an outgoing request finishes. * It has access to the final request and response objects. @@ -219,9 +371,12 @@ export class SentryHttpInstrumentation extends InstrumentationBase { + addTracePropagationHeadersToOutgoingRequest(request, this._propagationDecisionMap); + }); + } else if (shouldPropagate) { + addTracePropagationHeadersToOutgoingRequest(request, this._propagationDecisionMap); + } + } else if (shouldPropagate) { + addTracePropagationHeadersToOutgoingRequest(request, this._propagationDecisionMap); + } } /** @@ -247,7 +422,102 @@ export class SentryHttpInstrumentation extends InstrumentationBase { // eslint-disable-next-line no-console console.warn( diff --git a/packages/node-core/src/utils/baggage.ts b/packages/node-core/src/utils/baggage.ts index be8e62b9497b..d236851559db 100644 --- a/packages/node-core/src/utils/baggage.ts +++ b/packages/node-core/src/utils/baggage.ts @@ -1,8 +1,13 @@ import { objectToBaggageHeader, parseBaggageHeader } from '@sentry/core'; /** - * Merge two baggage headers into one, where the existing one takes precedence. + * Merge two baggage headers into one. + * - Sentry-specific entries (keys starting with "sentry-") from the new baggage take precedence + * - Non-Sentry entries from existing baggage take precedence * The order of the existing baggage will be preserved, and new entries will be added to the end. + * + * This matches the behavior of OTEL's propagation.inject() which uses baggage.setEntry() + * to overwrite existing entries with the same key. */ export function mergeBaggageHeaders( existing: Existing, @@ -19,10 +24,12 @@ export function mergeBaggageHeaders { - if (!mergedBaggageEntries[key]) { + // Sentry-specific keys always take precedence from new baggage + // Non-Sentry keys only added if not already present + if (key.startsWith('sentry-') || !mergedBaggageEntries[key]) { mergedBaggageEntries[key] = value; } }); diff --git a/packages/node-core/src/utils/captureRequestBody.ts b/packages/node-core/src/utils/captureRequestBody.ts index 3382409e0991..023209223f82 100644 --- a/packages/node-core/src/utils/captureRequestBody.ts +++ b/packages/node-core/src/utils/captureRequestBody.ts @@ -60,7 +60,7 @@ export function patchRequestToCaptureBody( `Dropping request body chunk because maximum body length of ${maxBodySize}b is exceeded.`, ); } - } catch (err) { + } catch (_err) { DEBUG_BUILD && debug.error(integrationName, 'Encountered error while storing body chunk.'); } diff --git a/packages/node-core/src/utils/detection.ts b/packages/node-core/src/utils/detection.ts index f7ae9a792c27..435f3b2a6686 100644 --- a/packages/node-core/src/utils/detection.ts +++ b/packages/node-core/src/utils/detection.ts @@ -4,6 +4,7 @@ import { NODE_MAJOR, NODE_MINOR } from '../nodeVersion'; /** Detect CommonJS. */ export function isCjs(): boolean { try { + // oxlint-disable-next-line typescript/prefer-optional-chain return typeof module !== 'undefined' && typeof module.exports !== 'undefined'; } catch { return false; diff --git a/packages/node-core/src/utils/getRequestUrl.ts b/packages/node-core/src/utils/getRequestUrl.ts index 5005224f59e0..73ddd33b447b 100644 --- a/packages/node-core/src/utils/getRequestUrl.ts +++ b/packages/node-core/src/utils/getRequestUrl.ts @@ -1,7 +1,11 @@ -import type { RequestOptions } from 'node:http'; - -/** Build a full URL from request options. */ -export function getRequestUrl(requestOptions: RequestOptions): string { +/** Build a full URL from request options or a ClientRequest. */ +export function getRequestUrl(requestOptions: { + protocol?: string | null; + hostname?: string | null; + host?: string | null; + port?: string | number | null; + path?: string | null; +}): string { const protocol = requestOptions.protocol || ''; const hostname = requestOptions.hostname || requestOptions.host || ''; // Don't log standard :80 (http) and :443 (https) ports to reduce the noise diff --git a/packages/node-core/src/utils/outgoingHttpRequest.ts b/packages/node-core/src/utils/outgoingHttpRequest.ts index 5292018e31ef..7eafa941286a 100644 --- a/packages/node-core/src/utils/outgoingHttpRequest.ts +++ b/packages/node-core/src/utils/outgoingHttpRequest.ts @@ -50,7 +50,7 @@ export function addTracePropagationHeadersToOutgoingRequest( request: ClientRequest, propagationDecisionMap: LRUMap, ): void { - const url = getRequestUrl(request); + const url = getClientRequestUrl(request); const { tracePropagationTargets, propagateTraceparent } = getClient()?.getOptions() || {}; const headersToAdd = shouldPropagateTraceForUrl(url, tracePropagationTargets, propagationDecisionMap) @@ -146,7 +146,10 @@ export function getRequestOptions(request: ClientRequest): RequestOptions { }; } -function getRequestUrl(request: ClientRequest): string { +/** + * + */ +export function getClientRequestUrl(request: ClientRequest): string { const hostname = request.getHeader('host') || request.host; const protocol = request.protocol; const path = request.path; diff --git a/packages/node-core/test/utils/baggage.test.ts b/packages/node-core/test/utils/baggage.test.ts new file mode 100644 index 000000000000..aae5c48d6068 --- /dev/null +++ b/packages/node-core/test/utils/baggage.test.ts @@ -0,0 +1,131 @@ +import { describe, expect, it } from 'vitest'; +import { mergeBaggageHeaders } from '../../src/utils/baggage'; + +describe('mergeBaggageHeaders', () => { + it('returns new baggage when existing is undefined', () => { + const result = mergeBaggageHeaders(undefined, 'foo=bar'); + expect(result).toBe('foo=bar'); + }); + + it('returns existing baggage when new baggage is empty', () => { + const result = mergeBaggageHeaders('foo=bar', ''); + expect(result).toBe('foo=bar'); + }); + + it('returns existing baggage when new baggage is invalid', () => { + const result = mergeBaggageHeaders('foo=bar', 'invalid'); + expect(result).toBe('foo=bar'); + }); + + it('handles empty existing baggage', () => { + const result = mergeBaggageHeaders('', 'foo=bar,sentry-release=1.0.0'); + expect(result).toBe('foo=bar,sentry-release=1.0.0'); + }); + + it('preserves existing non-Sentry entries', () => { + const result = mergeBaggageHeaders('foo=bar,other=vendor', 'foo=newvalue,third=party'); + + const entries = result?.split(','); + expect(entries).toContain('foo=bar'); + expect(entries).toContain('other=vendor'); + expect(entries).toContain('third=party'); + expect(entries).not.toContain('foo=newvalue'); + }); + + it('overwrites existing Sentry entries with new ones', () => { + const result = mergeBaggageHeaders( + 'sentry-release=1.0.0,sentry-environment=prod', + 'sentry-release=2.0.0,sentry-environment=staging', + ); + + const entries = result?.split(','); + expect(entries).toContain('sentry-release=2.0.0'); + expect(entries).toContain('sentry-environment=staging'); + expect(entries).not.toContain('sentry-release=1.0.0'); + expect(entries).not.toContain('sentry-environment=prod'); + }); + + it('merges Sentry and non-Sentry entries correctly', () => { + const result = mergeBaggageHeaders('foo=bar,sentry-release=1.0.0,other=vendor', 'sentry-release=2.0.0,third=party'); + + const entries = result?.split(','); + expect(entries).toContain('foo=bar'); + expect(entries).toContain('other=vendor'); + expect(entries).toContain('third=party'); + expect(entries).toContain('sentry-release=2.0.0'); + expect(entries).not.toContain('sentry-release=1.0.0'); + }); + + it('handles third-party baggage with Sentry entries', () => { + const result = mergeBaggageHeaders( + 'other=vendor,foo=bar,third=party,sentry-release=9.9.9,sentry-environment=staging,sentry-sample_rate=0.54,last=item', + 'sentry-release=2.1.0,sentry-environment=myEnv', + ); + + const entries = result?.split(','); + expect(entries).toContain('foo=bar'); + expect(entries).toContain('last=item'); + expect(entries).toContain('other=vendor'); + expect(entries).toContain('third=party'); + expect(entries).toContain('sentry-environment=myEnv'); + expect(entries).toContain('sentry-release=2.1.0'); + expect(entries).toContain('sentry-sample_rate=0.54'); + expect(entries).not.toContain('sentry-environment=staging'); + expect(entries).not.toContain('sentry-release=9.9.9'); + }); + + it('adds new Sentry entries when they do not exist', () => { + const result = mergeBaggageHeaders('foo=bar,other=vendor', 'sentry-release=1.0.0,sentry-environment=prod'); + + const entries = result?.split(','); + expect(entries).toContain('foo=bar'); + expect(entries).toContain('other=vendor'); + expect(entries).toContain('sentry-release=1.0.0'); + expect(entries).toContain('sentry-environment=prod'); + }); + + it('handles array-type existing baggage', () => { + const result = mergeBaggageHeaders(['foo=bar', 'other=vendor'], 'sentry-release=1.0.0'); + + const entries = result?.split(','); + expect(entries).toContain('foo=bar'); + expect(entries).toContain('other=vendor'); + expect(entries).toContain('sentry-release=1.0.0'); + }); + + it('preserves order of existing entries', () => { + const result = mergeBaggageHeaders('first=1,second=2,third=3', 'fourth=4'); + expect(result).toBe('first=1,second=2,third=3,fourth=4'); + }); + + it('handles complex scenario with multiple Sentry keys', () => { + const result = mergeBaggageHeaders( + 'foo=bar,sentry-release=old,sentry-environment=old,other=vendor', + 'sentry-release=new,sentry-environment=new,sentry-transaction=test,new=entry', + ); + + const entries = result?.split(','); + expect(entries).toContain('foo=bar'); + expect(entries).toContain('other=vendor'); + expect(entries).toContain('sentry-release=new'); + expect(entries).toContain('sentry-environment=new'); + expect(entries).toContain('sentry-transaction=test'); + expect(entries).toContain('new=entry'); + expect(entries).not.toContain('sentry-release=old'); + expect(entries).not.toContain('sentry-environment=old'); + }); + + it('matches OTEL propagation.inject() behavior for Sentry keys', () => { + const result = mergeBaggageHeaders( + 'sentry-trace_id=abc123,sentry-sampled=false,non-sentry=keep', + 'sentry-trace_id=xyz789,sentry-sampled=true', + ); + + const entries = result?.split(','); + expect(entries).toContain('sentry-trace_id=xyz789'); + expect(entries).toContain('sentry-sampled=true'); + expect(entries).toContain('non-sentry=keep'); + expect(entries).not.toContain('sentry-trace_id=abc123'); + expect(entries).not.toContain('sentry-sampled=false'); + }); +}); diff --git a/packages/node-native/package.json b/packages/node-native/package.json index 1d3650f3a05d..67f9a7f0c0ff 100644 --- a/packages/node-native/package.json +++ b/packages/node-native/package.json @@ -49,9 +49,9 @@ ], "scripts": { "clean": "rm -rf build", - "lint": "oxlint .", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/cjs/*.js && es-check es2022 ./build/esm/*.js --module", - "fix": "oxlint . --fix", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "build": "yarn build:types && yarn build:transpile", "build:transpile": "yarn rollup -c rollup.npm.config.mjs", "build:types:downlevel": "yarn downlevel-dts build/types build/types-ts3.8 --to ts3.8", diff --git a/packages/node/package.json b/packages/node/package.json index 86a5c9803f4c..151bc42c6275 100644 --- a/packages/node/package.json +++ b/packages/node/package.json @@ -66,40 +66,40 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/context-async-hooks": "^2.5.1", - "@opentelemetry/core": "^2.5.1", - "@opentelemetry/instrumentation": "^0.211.0", - "@opentelemetry/instrumentation-amqplib": "0.58.0", - "@opentelemetry/instrumentation-connect": "0.54.0", - "@opentelemetry/instrumentation-dataloader": "0.28.0", - "@opentelemetry/instrumentation-express": "0.59.0", - "@opentelemetry/instrumentation-fs": "0.30.0", - "@opentelemetry/instrumentation-generic-pool": "0.54.0", - "@opentelemetry/instrumentation-graphql": "0.58.0", - "@opentelemetry/instrumentation-hapi": "0.57.0", - "@opentelemetry/instrumentation-http": "0.211.0", - "@opentelemetry/instrumentation-ioredis": "0.59.0", - "@opentelemetry/instrumentation-kafkajs": "0.20.0", - "@opentelemetry/instrumentation-knex": "0.55.0", - "@opentelemetry/instrumentation-koa": "0.59.0", - "@opentelemetry/instrumentation-lru-memoizer": "0.55.0", - "@opentelemetry/instrumentation-mongodb": "0.64.0", - "@opentelemetry/instrumentation-mongoose": "0.57.0", - "@opentelemetry/instrumentation-mysql": "0.57.0", - "@opentelemetry/instrumentation-mysql2": "0.57.0", - "@opentelemetry/instrumentation-pg": "0.63.0", - "@opentelemetry/instrumentation-redis": "0.59.0", - "@opentelemetry/instrumentation-tedious": "0.30.0", - "@opentelemetry/instrumentation-undici": "0.21.0", - "@opentelemetry/resources": "^2.5.1", - "@opentelemetry/sdk-trace-base": "^2.5.1", - "@opentelemetry/semantic-conventions": "^1.39.0", - "@prisma/instrumentation": "7.2.0", - "@fastify/otel": "0.16.0", + "@opentelemetry/context-async-hooks": "^2.6.0", + "@opentelemetry/core": "^2.6.0", + "@opentelemetry/instrumentation": "^0.213.0", + "@opentelemetry/instrumentation-amqplib": "0.60.0", + "@opentelemetry/instrumentation-connect": "0.56.0", + "@opentelemetry/instrumentation-dataloader": "0.30.0", + "@opentelemetry/instrumentation-express": "0.61.0", + "@opentelemetry/instrumentation-fs": "0.32.0", + "@opentelemetry/instrumentation-generic-pool": "0.56.0", + "@opentelemetry/instrumentation-graphql": "0.61.0", + "@opentelemetry/instrumentation-hapi": "0.59.0", + "@opentelemetry/instrumentation-http": "0.213.0", + "@opentelemetry/instrumentation-ioredis": "0.61.0", + "@opentelemetry/instrumentation-kafkajs": "0.22.0", + "@opentelemetry/instrumentation-knex": "0.57.0", + "@opentelemetry/instrumentation-koa": "0.61.0", + "@opentelemetry/instrumentation-lru-memoizer": "0.57.0", + "@opentelemetry/instrumentation-mongodb": "0.66.0", + "@opentelemetry/instrumentation-mongoose": "0.59.0", + "@opentelemetry/instrumentation-mysql": "0.59.0", + "@opentelemetry/instrumentation-mysql2": "0.59.0", + "@opentelemetry/instrumentation-pg": "0.65.0", + "@opentelemetry/instrumentation-redis": "0.61.0", + "@opentelemetry/instrumentation-tedious": "0.32.0", + "@opentelemetry/instrumentation-undici": "0.23.0", + "@opentelemetry/resources": "^2.6.0", + "@opentelemetry/sdk-trace-base": "^2.6.0", + "@opentelemetry/semantic-conventions": "^1.40.0", + "@prisma/instrumentation": "7.4.2", + "@fastify/otel": "0.17.1", "@sentry/core": "10.43.0", "@sentry/node-core": "10.43.0", "@sentry/opentelemetry": "10.43.0", - "import-in-the-middle": "^2.0.6" + "import-in-the-middle": "^3.0.0" }, "devDependencies": { "@types/node": "^18.19.1" @@ -117,8 +117,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-node-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/cjs/*.js && es-check es2022 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/node/src/integrations/http.ts b/packages/node/src/integrations/http.ts index 7bde0ae40a21..3e38c12f0c4b 100644 --- a/packages/node/src/integrations/http.ts +++ b/packages/node/src/integrations/http.ts @@ -26,6 +26,13 @@ const INTEGRATION_NAME = 'Http'; const INSTRUMENTATION_NAME = '@opentelemetry_sentry-patched/instrumentation-http'; +// The `http.client.request.created` diagnostics channel, needed for trace propagation, +// was added in Node 22.12.0 (backported from 23.2.0). Earlier 22.x versions don't have it. +const FULLY_SUPPORTS_HTTP_DIAGNOSTICS_CHANNEL = + (NODE_VERSION.major === 22 && NODE_VERSION.minor >= 12) || + (NODE_VERSION.major === 23 && NODE_VERSION.minor >= 2) || + NODE_VERSION.major >= 24; + interface HttpOptions { /** * Whether breadcrumbs should be recorded for outgoing requests. @@ -185,6 +192,20 @@ export const instrumentOtelHttp = generateInstrumentOnce=0.213.0) has a guard (`_httpPatched`/`_httpsPatched`) + // that prevents patching `http`/`https` when loaded by both CJS `require()` and ESM `import`. + // In environments like AWS Lambda, the runtime loads `http` via CJS first (for the Runtime API), + // and then the user's ESM handler imports `node:http`. The guard blocks ESM patching after CJS, + // which breaks HTTP spans for ESM handlers. We disable this guard to allow both to be patched. + // TODO(andrei): Remove once https://github.com/open-telemetry/opentelemetry-js/issues/6489 is fixed. + try { + const noopDescriptor = { get: () => false, set: () => {} }; + Object.defineProperty(instrumentation, '_httpPatched', noopDescriptor); + Object.defineProperty(instrumentation, '_httpsPatched', noopDescriptor); + } catch { + // ignore errors here... + } + return instrumentation; }); @@ -203,9 +224,9 @@ export function _shouldUseOtelHttpInstrumentation( return false; } - // IMPORTANT: We only disable span instrumentation when spans are not enabled _and_ we are on Node 22+, - // as otherwise the necessary diagnostics channel is not available yet - if (!hasSpansEnabled(clientOptions) && NODE_VERSION.major >= 22) { + // IMPORTANT: We only disable span instrumentation when spans are not enabled _and_ we are on a Node version + // that fully supports the necessary diagnostics channels for trace propagation + if (!hasSpansEnabled(clientOptions) && FULLY_SUPPORTS_HTTP_DIAGNOSTICS_CHANNEL) { return false; } @@ -258,8 +279,26 @@ export const httpIntegration = defineIntegration((options: HttpOptions = {}) => const sentryHttpInstrumentationOptions = { breadcrumbs: options.breadcrumbs, propagateTraceInOutgoingRequests: - typeof options.tracePropagation === 'boolean' ? options.tracePropagation : !useOtelHttpInstrumentation, + typeof options.tracePropagation === 'boolean' + ? options.tracePropagation + : FULLY_SUPPORTS_HTTP_DIAGNOSTICS_CHANNEL || !useOtelHttpInstrumentation, + createSpansForOutgoingRequests: FULLY_SUPPORTS_HTTP_DIAGNOSTICS_CHANNEL, + spans: options.spans, ignoreOutgoingRequests: options.ignoreOutgoingRequests, + outgoingRequestHook: (span: Span, request: ClientRequest) => { + // Sanitize data URLs to prevent long base64 strings in span attributes + const url = getRequestUrl(request); + if (url.startsWith('data:')) { + const sanitizedUrl = stripDataUrlContent(url); + span.setAttribute('http.url', sanitizedUrl); + span.setAttribute(SEMANTIC_ATTRIBUTE_URL_FULL, sanitizedUrl); + span.updateName(`${request.method || 'GET'} ${sanitizedUrl}`); + } + + options.instrumentation?.requestHook?.(span, request); + }, + outgoingResponseHook: options.instrumentation?.responseHook, + outgoingRequestApplyCustomAttributes: options.instrumentation?.applyCustomAttributesOnSpan, } satisfies SentryHttpInstrumentationOptions; // This is Sentry-specific instrumentation for outgoing request breadcrumbs & trace propagation @@ -281,6 +320,9 @@ export const httpIntegration = defineIntegration((options: HttpOptions = {}) => function getConfigWithDefaults(options: Partial = {}): HttpInstrumentationConfig { const instrumentationConfig = { + // This is handled by the SentryHttpInstrumentation on Node 22+ + disableOutgoingRequestInstrumentation: FULLY_SUPPORTS_HTTP_DIAGNOSTICS_CHANNEL, + ignoreOutgoingRequestHook: request => { const url = getRequestUrl(request); diff --git a/packages/node/src/integrations/node-fetch.ts b/packages/node/src/integrations/node-fetch.ts index 2a1e1cac9098..74bfff2dab47 100644 --- a/packages/node/src/integrations/node-fetch.ts +++ b/packages/node/src/integrations/node-fetch.ts @@ -16,7 +16,10 @@ import type { NodeClientOptions } from '../types'; const INTEGRATION_NAME = 'NodeFetch'; -interface NodeFetchOptions extends Pick { +interface NodeFetchOptions extends Pick< + UndiciInstrumentationConfig, + 'requestHook' | 'responseHook' | 'headersToSpanAttributes' +> { /** * Whether breadcrumbs should be recorded for requests. * Defaults to true @@ -54,7 +57,7 @@ const instrumentOtelNodeFetch = generateInstrumentOnce( INTEGRATION_NAME, UndiciInstrumentation, (options: NodeFetchOptions) => { - return getConfigWithDefaults(options); + return _getConfigWithDefaults(options); }, ); @@ -110,7 +113,8 @@ function _shouldInstrumentSpans(options: NodeFetchOptions, clientOptions: Partia : !clientOptions.skipOpenTelemetrySetup && hasSpansEnabled(clientOptions); } -function getConfigWithDefaults(options: Partial = {}): UndiciInstrumentationConfig { +/** Exported only for tests. */ +export function _getConfigWithDefaults(options: Partial = {}): UndiciInstrumentationConfig { const instrumentationConfig = { requireParentforSpans: false, ignoreRequestHook: request => { @@ -140,6 +144,7 @@ function getConfigWithDefaults(options: Partial = {}): UndiciI }, requestHook: options.requestHook, responseHook: options.responseHook, + headersToSpanAttributes: options.headersToSpanAttributes, } satisfies UndiciInstrumentationConfig; return instrumentationConfig; diff --git a/packages/node/src/integrations/tracing/anthropic-ai/instrumentation.ts b/packages/node/src/integrations/tracing/anthropic-ai/instrumentation.ts index 4fc96aa5ea92..b01b34f2f8ad 100644 --- a/packages/node/src/integrations/tracing/anthropic-ai/instrumentation.ts +++ b/packages/node/src/integrations/tracing/anthropic-ai/instrumentation.ts @@ -8,7 +8,6 @@ import type { AnthropicAiClient, AnthropicAiOptions } from '@sentry/core'; import { _INTERNAL_shouldSkipAiProviderWrapping, ANTHROPIC_AI_INTEGRATION_NAME, - getClient, instrumentAnthropicAiClient, SDK_VERSION, } from '@sentry/core'; @@ -60,16 +59,8 @@ export class SentryAnthropicAiInstrumentation extends InstrumentationBase AnthropicAiClient; // Preserve static and prototype chains @@ -89,7 +80,7 @@ export class SentryAnthropicAiInstrumentation extends InstrumentationBase=0.0.0 <2.0.0']; @@ -59,27 +59,11 @@ export class SentryLangGraphInstrumentation extends InstrumentationBase; - }; - - StateGraph.prototype.compile = instrumentStateGraphCompile( - StateGraph.prototype.compile as (...args: unknown[]) => CompiledGraph, - options, + instrumentLangGraph( + exports.StateGraph.prototype as { compile: (...args: unknown[]) => unknown }, + this.getConfig(), ); } diff --git a/packages/node/src/integrations/tracing/openai/instrumentation.ts b/packages/node/src/integrations/tracing/openai/instrumentation.ts index 42ed7faf71e3..0d44a056838a 100644 --- a/packages/node/src/integrations/tracing/openai/instrumentation.ts +++ b/packages/node/src/integrations/tracing/openai/instrumentation.ts @@ -7,7 +7,6 @@ import { import type { Integration, OpenAiClient, OpenAiOptions } from '@sentry/core'; import { _INTERNAL_shouldSkipAiProviderWrapping, - getClient, instrumentOpenAiClient, OPENAI_INTEGRATION_NAME, SDK_VERSION, @@ -74,16 +73,8 @@ export class SentryOpenAiInstrumentation extends InstrumentationBase OpenAiClient; // Preserve static and prototype chains @@ -103,7 +94,7 @@ export class SentryOpenAiInstrumentation extends InstrumentationBase { expect(actual).toBe(expected); }); - conditionalTest({ min: 22 })('returns false without tracesSampleRate on Node >=22', () => { + conditionalTest({ min: 22 })('returns false without tracesSampleRate on Node >=22.12', () => { const actual = _shouldUseOtelHttpInstrumentation({}, {}); expect(actual).toBe(false); }); diff --git a/packages/node/test/integrations/node-fetch.test.ts b/packages/node/test/integrations/node-fetch.test.ts new file mode 100644 index 000000000000..a627d48dc6c0 --- /dev/null +++ b/packages/node/test/integrations/node-fetch.test.ts @@ -0,0 +1,25 @@ +import { describe, expect, it } from 'vitest'; +import { _getConfigWithDefaults } from '../../src/integrations/node-fetch'; + +describe('nativeNodeFetchIntegration', () => { + describe('_getConfigWithDefaults', () => { + it('passes headersToSpanAttributes through to the config', () => { + const config = _getConfigWithDefaults({ + headersToSpanAttributes: { + requestHeaders: ['x-custom-header'], + responseHeaders: ['content-length', 'content-type'], + }, + }); + + expect(config.headersToSpanAttributes).toEqual({ + requestHeaders: ['x-custom-header'], + responseHeaders: ['content-length', 'content-type'], + }); + }); + + it('does not set headersToSpanAttributes when not provided', () => { + const config = _getConfigWithDefaults({}); + expect(config.headersToSpanAttributes).toBeUndefined(); + }); + }); +}); diff --git a/packages/nuxt/package.json b/packages/nuxt/package.json index d943e651334b..ccdc56c2a113 100644 --- a/packages/nuxt/package.json +++ b/packages/nuxt/package.json @@ -77,8 +77,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.client.ts && madge --circular src/index.server.ts && madge --circular src/index.types.ts", "clean": "rimraf build coverage sentry-nuxt-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module && es-check es2020 ./build/module/*.cjs && es-check es2020 ./build/module/*.mjs --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/nuxt/src/runtime/plugins/storage.server.ts b/packages/nuxt/src/runtime/plugins/storage.server.ts index 14d9ea8aa17a..1c5a3fd678d4 100644 --- a/packages/nuxt/src/runtime/plugins/storage.server.ts +++ b/packages/nuxt/src/runtime/plugins/storage.server.ts @@ -262,7 +262,7 @@ function isCacheHit(key: string, value: unknown): boolean { } return validateCacheEntry(key, JSON.parse(String(value)) as CacheEntry); - } catch (error) { + } catch { // this is a best effort, so we return false if we can't validate the cache entry return false; } diff --git a/packages/nuxt/src/runtime/utils.ts b/packages/nuxt/src/runtime/utils.ts index ce8069c58cdb..2c3526b951c9 100644 --- a/packages/nuxt/src/runtime/utils.ts +++ b/packages/nuxt/src/runtime/utils.ts @@ -72,6 +72,7 @@ export function reportNuxtError(options: { const sentryOptions = sentryClient ? (sentryClient.getOptions() as ClientOptions & VueOptions) : null; // `attachProps` is enabled by default and props should only not be attached if explicitly disabled (see DEFAULT_CONFIG in `vueIntegration`). + // oxlint-disable-next-line typescript/no-unsafe-member-access if (sentryOptions?.attachProps && instance.$props !== false) { metadata.propsData = instance.$props; } diff --git a/packages/nuxt/src/vite/sentryVitePlugin.ts b/packages/nuxt/src/vite/sentryVitePlugin.ts index 78c11110bf72..f301a17c9423 100644 --- a/packages/nuxt/src/vite/sentryVitePlugin.ts +++ b/packages/nuxt/src/vite/sentryVitePlugin.ts @@ -7,17 +7,16 @@ import { extractNuxtSourceMapSetting, getPluginOptions, validateDifferentSourceM /** * Creates a Vite plugin that adds the Sentry Vite plugin and validates source map settings. */ -export function createSentryViteConfigPlugin(options: { +export function validateSourceMapsOptionsPlugin(options: { nuxt: Nuxt; moduleOptions: SentryNuxtModuleOptions; sourceMapsEnabled: boolean; - shouldDeleteFilesFallback: { client: boolean; server: boolean }; }): Plugin { - const { nuxt, moduleOptions, sourceMapsEnabled, shouldDeleteFilesFallback } = options; + const { nuxt, moduleOptions, sourceMapsEnabled } = options; const isDebug = moduleOptions.debug; return { - name: 'sentry-nuxt-vite-config', + name: 'sentry-nuxt-source-map-validation', config(viteConfig: UserConfig, env: ConfigEnv) { // Only run in production builds if (!sourceMapsEnabled || env.mode === 'development' || nuxt.options?._prepare) { @@ -34,6 +33,11 @@ export function createSentryViteConfigPlugin(options: { viteConfig.build = viteConfig.build || {}; const viteSourceMap = viteConfig.build.sourcemap; + if (isDebug) { + // eslint-disable-next-line no-console + console.log(`[Sentry] Validating Vite config for the ${runtime} runtime.`); + } + // Vite source map options are the same as the Nuxt source map config options (unless overwritten) validateDifferentSourceMapSettings({ nuxtSettingKey: `sourcemap.${runtime}`, @@ -41,17 +45,6 @@ export function createSentryViteConfigPlugin(options: { otherSettingKey: 'viteConfig.build.sourcemap', otherSettingValue: viteSourceMap, }); - - if (isDebug) { - // eslint-disable-next-line no-console - console.log(`[Sentry] Adding Sentry Vite plugin to the ${runtime} runtime.`); - } - - // Add Sentry plugin by mutating the config - // Vite plugin is added on the client and server side (plugin runs for both builds) - // Nuxt client source map is 'false' by default. Warning about this will be shown already in an earlier step, and it's also documented that `nuxt.sourcemap.client` needs to be enabled. - viteConfig.plugins = viteConfig.plugins || []; - viteConfig.plugins.push(sentryVitePlugin(getPluginOptions(moduleOptions, shouldDeleteFilesFallback))); }, }; } diff --git a/packages/nuxt/src/vite/sourceMaps.ts b/packages/nuxt/src/vite/sourceMaps.ts index b270a34a50b5..c13126074871 100644 --- a/packages/nuxt/src/vite/sourceMaps.ts +++ b/packages/nuxt/src/vite/sourceMaps.ts @@ -1,10 +1,10 @@ import type { Nuxt } from '@nuxt/schema'; import { sentryRollupPlugin, type SentryRollupPluginOptions } from '@sentry/rollup-plugin'; -import type { SentryVitePluginOptions } from '@sentry/vite-plugin'; +import { sentryVitePlugin, type SentryVitePluginOptions } from '@sentry/vite-plugin'; import type { NitroConfig } from 'nitropack'; import type { Plugin } from 'vite'; import type { SentryNuxtModuleOptions } from '../common/types'; -import { createSentryViteConfigPlugin } from './sentryVitePlugin'; +import { validateSourceMapsOptionsPlugin } from './sentryVitePlugin'; /** * Whether the user enabled (true, 'hidden', 'inline') or disabled (false) source maps @@ -20,7 +20,7 @@ export type SourceMapSetting = boolean | 'hidden' | 'inline'; export function setupSourceMaps( moduleOptions: SentryNuxtModuleOptions, nuxt: Nuxt, - addVitePlugin: (plugin: Plugin | (() => Plugin), options?: { dev?: boolean; build?: boolean }) => void, + addVitePlugin: (plugin: Plugin[], options?: { dev?: boolean; build?: boolean }) => void, ): void { // TODO(v11): remove deprecated options (also from SentryNuxtModuleOptions type) @@ -81,16 +81,16 @@ export function setupSourceMaps( } }); - addVitePlugin( - createSentryViteConfigPlugin({ - nuxt, - moduleOptions, - sourceMapsEnabled, - shouldDeleteFilesFallback, - }), - // Only add source map plugin during build - { dev: false, build: true }, - ); + if (sourceMapsEnabled && !nuxt.options.dev && !nuxt.options?._prepare) { + addVitePlugin( + [ + validateSourceMapsOptionsPlugin({ nuxt, moduleOptions, sourceMapsEnabled }), + // Vite plugin is added on the client and server side (plugin runs for both builds) + ...sentryVitePlugin(getPluginOptions(moduleOptions, shouldDeleteFilesFallback)), + ], + { dev: false, build: true }, // Only add source map plugin during build + ); + } nuxt.hook('nitro:config', (nitroConfig: NitroConfig) => { if (sourceMapsEnabled && !nitroConfig.dev && !nuxt.options?._prepare) { diff --git a/packages/nuxt/test/vite/sourceMaps-nuxtHooks.test.ts b/packages/nuxt/test/vite/sourceMaps-nuxtHooks.test.ts index 4a881583ac93..be33cf500397 100644 --- a/packages/nuxt/test/vite/sourceMaps-nuxtHooks.test.ts +++ b/packages/nuxt/test/vite/sourceMaps-nuxtHooks.test.ts @@ -4,15 +4,16 @@ import { afterAll, beforeAll, beforeEach, describe, expect, it, vi } from 'vites import type { SourceMapSetting } from '../../src/vite/sourceMaps'; function createMockAddVitePlugin() { - let capturedPlugin: Plugin | null = null; + let capturedPlugins: Plugin[] | null = null; - const mockAddVitePlugin = vi.fn((plugin: Plugin | (() => Plugin)) => { - capturedPlugin = typeof plugin === 'function' ? plugin() : plugin; + const mockAddVitePlugin = vi.fn((plugins: Plugin[]) => { + capturedPlugins = plugins; }); return { mockAddVitePlugin, - getCapturedPlugin: () => capturedPlugin, + getCapturedPlugin: () => capturedPlugins?.[0] ?? null, + getCapturedPlugins: () => capturedPlugins, }; } @@ -46,7 +47,7 @@ function createMockNuxt(options: { } describe('setupSourceMaps hooks', () => { - const mockSentryVitePlugin = vi.fn(() => ({ name: 'sentry-vite-plugin' })); + const mockSentryVitePlugin = vi.fn(() => [{ name: 'sentry-vite-plugin' }]); const mockSentryRollupPlugin = vi.fn(() => ({ name: 'sentry-rollup-plugin' })); const consoleLogSpy = vi.spyOn(console, 'log'); @@ -85,39 +86,27 @@ describe('setupSourceMaps hooks', () => { const plugin = getCapturedPlugin(); expect(plugin).not.toBeNull(); - expect(plugin?.name).toBe('sentry-nuxt-vite-config'); - // modules:done is called afterward. Later, the plugin is actually added + expect(plugin?.name).toBe('sentry-nuxt-source-map-validation'); }); it.each([ { label: 'prepare mode', nuxtOptions: { _prepare: true }, - viteOptions: { mode: 'production', command: 'build' as const }, - buildConfig: { build: {}, plugins: [] }, }, { label: 'dev mode', nuxtOptions: { dev: true }, - viteOptions: { mode: 'development', command: 'build' as const }, - buildConfig: { build: {}, plugins: [] }, }, - ])('does not add plugins to vite config in $label', async ({ nuxtOptions, viteOptions, buildConfig }) => { + ])('does not add plugins to vite config in $label', async ({ nuxtOptions }) => { const { setupSourceMaps } = await import('../../src/vite/sourceMaps'); const mockNuxt = createMockNuxt(nuxtOptions); - const { mockAddVitePlugin, getCapturedPlugin } = createMockAddVitePlugin(); + const { mockAddVitePlugin } = createMockAddVitePlugin(); setupSourceMaps({ debug: true }, mockNuxt as unknown as Nuxt, mockAddVitePlugin); await mockNuxt.triggerHook('modules:done'); - const plugin = getCapturedPlugin(); - expect(plugin).not.toBeNull(); - - if (plugin && typeof plugin.config === 'function') { - const viteConfig: UserConfig = buildConfig; - plugin.config(viteConfig, viteOptions); - expect(viteConfig.plugins?.length).toBe(0); - } + expect(mockAddVitePlugin).not.toHaveBeenCalled(); }); it.each([ @@ -126,19 +115,14 @@ describe('setupSourceMaps hooks', () => { ])('adds sentry vite plugin to vite config for $label in production', async ({ buildConfig }) => { const { setupSourceMaps } = await import('../../src/vite/sourceMaps'); const mockNuxt = createMockNuxt({ _prepare: false, dev: false }); - const { mockAddVitePlugin, getCapturedPlugin } = createMockAddVitePlugin(); + const { mockAddVitePlugin, getCapturedPlugins } = createMockAddVitePlugin(); setupSourceMaps({ debug: true }, mockNuxt as unknown as Nuxt, mockAddVitePlugin); - await mockNuxt.triggerHook('modules:done'); - - const plugin = getCapturedPlugin(); - expect(plugin).not.toBeNull(); - if (plugin && typeof plugin.config === 'function') { - const viteConfig: UserConfig = buildConfig; - plugin.config(viteConfig, { mode: 'production', command: 'build' }); - expect(viteConfig.plugins?.length).toBeGreaterThan(0); - } + const plugins = getCapturedPlugins(); + expect(plugins).not.toBeNull(); + expect(plugins?.length).toBeGreaterThan(0); + expect(mockSentryVitePlugin).toHaveBeenCalled(); }); }); @@ -146,15 +130,9 @@ describe('setupSourceMaps hooks', () => { it('calls sentryVitePlugin in production mode', async () => { const { setupSourceMaps } = await import('../../src/vite/sourceMaps'); const mockNuxt = createMockNuxt({ _prepare: false, dev: false }); - const { mockAddVitePlugin, getCapturedPlugin } = createMockAddVitePlugin(); + const { mockAddVitePlugin } = createMockAddVitePlugin(); setupSourceMaps({ debug: true }, mockNuxt as unknown as Nuxt, mockAddVitePlugin); - await mockNuxt.triggerHook('modules:done'); - - const plugin = getCapturedPlugin(); - if (plugin && typeof plugin.config === 'function') { - plugin.config({ build: { ssr: false }, plugins: [] }, { mode: 'production', command: 'build' }); - } expect(mockSentryVitePlugin).toHaveBeenCalled(); }); @@ -162,18 +140,12 @@ describe('setupSourceMaps hooks', () => { it.each([ { label: 'prepare mode', nuxtOptions: { _prepare: true }, viteMode: 'production' as const }, { label: 'dev mode', nuxtOptions: { dev: true }, viteMode: 'development' as const }, - ])('does not call sentryVitePlugin in $label', async ({ nuxtOptions, viteMode }) => { + ])('does not call sentryVitePlugin in $label', async ({ nuxtOptions }) => { const { setupSourceMaps } = await import('../../src/vite/sourceMaps'); const mockNuxt = createMockNuxt(nuxtOptions); - const { mockAddVitePlugin, getCapturedPlugin } = createMockAddVitePlugin(); + const { mockAddVitePlugin } = createMockAddVitePlugin(); setupSourceMaps({ debug: true }, mockNuxt as unknown as Nuxt, mockAddVitePlugin); - await mockNuxt.triggerHook('modules:done'); - - const plugin = getCapturedPlugin(); - if (plugin && typeof plugin.config === 'function') { - plugin.config({ build: {}, plugins: [] }, { mode: viteMode, command: 'build' }); - } expect(mockSentryVitePlugin).not.toHaveBeenCalled(); }); @@ -187,23 +159,16 @@ describe('setupSourceMaps hooks', () => { '.*/**/function/**/*.map', ]; - it('uses mutated shouldDeleteFilesFallback (unset → true): plugin.config() after modules:done gets fallback filesToDeleteAfterUpload', async () => { + it('sentryVitePlugin is called with fallback filesToDeleteAfterUpload when source maps are unset', async () => { const { setupSourceMaps } = await import('../../src/vite/sourceMaps'); const mockNuxt = createMockNuxt({ _prepare: false, dev: false, sourcemap: { client: undefined, server: undefined }, }); - const { mockAddVitePlugin, getCapturedPlugin } = createMockAddVitePlugin(); + const { mockAddVitePlugin } = createMockAddVitePlugin(); setupSourceMaps({ debug: false }, mockNuxt as unknown as Nuxt, mockAddVitePlugin); - await mockNuxt.triggerHook('modules:done'); - - const plugin = getCapturedPlugin(); - expect(plugin).not.toBeNull(); - if (plugin && typeof plugin.config === 'function') { - plugin.config({ build: { ssr: false }, plugins: [] }, { mode: 'production', command: 'build' }); - } expect(mockSentryVitePlugin).toHaveBeenCalledWith( expect.objectContaining({ @@ -214,25 +179,22 @@ describe('setupSourceMaps hooks', () => { ); }); - it('uses mutated shouldDeleteFilesFallback (explicitly enabled → false): plugin.config() after modules:done gets no filesToDeleteAfterUpload', async () => { + it('sentryRollupPlugin is called without filesToDeleteAfterUpload when source maps are explicitly enabled', async () => { const { setupSourceMaps } = await import('../../src/vite/sourceMaps'); const mockNuxt = createMockNuxt({ _prepare: false, dev: false, sourcemap: { client: true, server: true }, }); - const { mockAddVitePlugin, getCapturedPlugin } = createMockAddVitePlugin(); + const { mockAddVitePlugin } = createMockAddVitePlugin(); setupSourceMaps({ debug: false }, mockNuxt as unknown as Nuxt, mockAddVitePlugin); await mockNuxt.triggerHook('modules:done'); - const plugin = getCapturedPlugin(); - expect(plugin).not.toBeNull(); - if (plugin && typeof plugin.config === 'function') { - plugin.config({ build: { ssr: false }, plugins: [] }, { mode: 'production', command: 'build' }); - } + const nitroConfig = { rollupConfig: { plugins: [] as unknown[], output: {} }, dev: false }; + await mockNuxt.triggerHook('nitro:config', nitroConfig); - const pluginOptions = (mockSentryVitePlugin?.mock?.calls?.[0] as unknown[])?.[0] as { + const pluginOptions = (mockSentryRollupPlugin?.mock?.calls?.[0] as unknown[])?.[0] as { sourcemaps?: { filesToDeleteAfterUpload?: string[] }; }; expect(pluginOptions?.sourcemaps?.filesToDeleteAfterUpload).toBeUndefined(); @@ -286,14 +248,14 @@ describe('setupSourceMaps hooks', () => { const plugin = getCapturedPlugin(); if (plugin && typeof plugin.config === 'function') { - plugin.config({ build: { ssr: false }, plugins: [] }, { mode: 'production', command: 'build' }); + plugin.config({ build: { ssr: false }, plugins: [] } as UserConfig, { mode: 'production', command: 'build' }); } const nitroConfig = { rollupConfig: { plugins: [] as unknown[], output: {} }, dev: false }; await mockNuxt.triggerHook('nitro:config', nitroConfig); expect(consoleLogSpy).toHaveBeenCalledWith( - expect.stringContaining('[Sentry] Adding Sentry Vite plugin to the client runtime.'), + expect.stringContaining('[Sentry] Validating Vite config for the client runtime.'), ); expect(consoleLogSpy).toHaveBeenCalledWith( expect.stringContaining('[Sentry] Adding Sentry Rollup plugin to the server runtime.'), @@ -310,7 +272,7 @@ describe('setupSourceMaps hooks', () => { const plugin = getCapturedPlugin(); if (plugin && typeof plugin.config === 'function') { - plugin.config({ build: {}, plugins: [] }, { mode: 'production', command: 'build' }); + plugin.config({ build: {}, plugins: [] } as UserConfig, { mode: 'production', command: 'build' }); } await mockNuxt.triggerHook('nitro:config', { rollupConfig: { plugins: [] }, dev: false }); diff --git a/packages/opentelemetry/package.json b/packages/opentelemetry/package.json index e6df263bbc3b..89fd38e2d558 100644 --- a/packages/opentelemetry/package.json +++ b/packages/opentelemetry/package.json @@ -50,10 +50,10 @@ }, "devDependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/context-async-hooks": "^2.5.1", - "@opentelemetry/core": "^2.5.1", - "@opentelemetry/sdk-trace-base": "^2.5.1", - "@opentelemetry/semantic-conventions": "^1.39.0" + "@opentelemetry/context-async-hooks": "^2.6.0", + "@opentelemetry/core": "^2.6.0", + "@opentelemetry/sdk-trace-base": "^2.6.0", + "@opentelemetry/semantic-conventions": "^1.40.0" }, "scripts": { "build": "run-p build:transpile build:types", @@ -68,8 +68,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-opentelemetry-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/cjs/*.js && es-check es2022 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/opentelemetry/src/utils/parseSpanDescription.ts b/packages/opentelemetry/src/utils/parseSpanDescription.ts index bb9c5f59acf7..fc0f92143516 100644 --- a/packages/opentelemetry/src/utils/parseSpanDescription.ts +++ b/packages/opentelemetry/src/utils/parseSpanDescription.ts @@ -220,6 +220,7 @@ export function descriptionForHttpMethod( function getGraphqlOperationNamesFromAttribute(attr: AttributeValue): string { if (Array.isArray(attr)) { + // oxlint-disable-next-line typescript/require-array-sort-compare const sorted = attr.slice().sort(); // Up to 5 items, we just add all of them diff --git a/packages/profiling-node/package.json b/packages/profiling-node/package.json index 909870deb062..a284f5901978 100644 --- a/packages/profiling-node/package.json +++ b/packages/profiling-node/package.json @@ -45,9 +45,9 @@ ], "scripts": { "clean": "rm -rf build", - "lint": "oxlint .", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2022 ./build/cjs/*.js && es-check es2022 ./build/esm/*.js --module", - "fix": "oxlint . --fix", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "build": "yarn build:types && yarn build:transpile", "build:transpile": "yarn rollup -c rollup.npm.config.mjs", "build:types:downlevel": "yarn downlevel-dts build/types build/types-ts3.8 --to ts3.8", diff --git a/packages/profiling-node/src/integration.ts b/packages/profiling-node/src/integration.ts index 17ce6f702639..9b4fd1601420 100644 --- a/packages/profiling-node/src/integration.ts +++ b/packages/profiling-node/src/integration.ts @@ -15,7 +15,7 @@ import { import type { NodeClient, NodeOptions } from '@sentry/node'; import { CpuProfilerBindings, ProfileFormat, type RawThreadCpuProfile } from '@sentry-internal/node-cpu-profiler'; import { DEBUG_BUILD } from './debug-build'; -import { NODE_MAJOR, NODE_VERSION } from './nodeVersion'; +import { NODE_MAJOR } from './nodeVersion'; import { MAX_PROFILE_DURATION_MS, maybeProfileSpan, stopSpanProfile } from './spanProfileUtils'; import { addProfilesToEnvelope, @@ -634,7 +634,7 @@ export const _nodeProfilingIntegration = ((): ProfilingIntegration = consoleSandbox(() => { // eslint-disable-next-line no-console console.warn( - `[Sentry Profiling] You are using a Node.js version that does not have prebuilt binaries (${NODE_VERSION}).`, + `[Sentry Profiling] You are using a Node.js version that does not have prebuilt binaries (${NODE_MAJOR}).`, 'The @sentry/profiling-node package only has prebuilt support for the following LTS versions of Node.js: 16, 18, 20, 22, 24.', 'To use the @sentry/profiling-node package with this version of Node.js, you will need to compile the native addon from source.', 'See: https://github.com/getsentry/sentry-javascript/tree/develop/packages/profiling-node#building-the-package-from-source', diff --git a/packages/profiling-node/test/integration.test.ts b/packages/profiling-node/test/integration.test.ts index a0ff14d4602e..41f195b8b3c6 100644 --- a/packages/profiling-node/test/integration.test.ts +++ b/packages/profiling-node/test/integration.test.ts @@ -5,6 +5,7 @@ import type { NodeClientOptions } from '@sentry/node/build/types/types'; import { CpuProfilerBindings } from '@sentry-internal/node-cpu-profiler'; import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; import { _nodeProfilingIntegration } from '../src/integration'; +import { NODE_VERSION } from '../src/nodeVersion'; function makeLegacySpanProfilingClient(): [Sentry.NodeClient, Transport] { const integration = _nodeProfilingIntegration(); @@ -984,6 +985,16 @@ describe('ProfilingIntegration', () => { }); }); +describe('NODE_VERSION', () => { + it('is a plain object without a custom toString', () => { + // NODE_VERSION is a SemVer object from parseSemver — it has no custom toString(). + // Code should never interpolate it directly in a template literal. + // Use process.versions.node or format the components manually instead. + expect(`${NODE_VERSION}`).toBe('[object Object]'); + expect(`${NODE_VERSION.major}.${NODE_VERSION.minor}.${NODE_VERSION.patch}`).toMatch(/^\d+\.\d+\.\d+$/); + }); +}); + describe('Legacy vs Current API compat', () => { describe('legacy', () => { describe('span profiling', () => { diff --git a/packages/react-router/package.json b/packages/react-router/package.json index a61347702825..eb6b595dc391 100644 --- a/packages/react-router/package.json +++ b/packages/react-router/package.json @@ -46,9 +46,9 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/core": "^2.5.1", - "@opentelemetry/instrumentation": "^0.211.0", - "@opentelemetry/semantic-conventions": "^1.39.0", + "@opentelemetry/core": "^2.6.0", + "@opentelemetry/instrumentation": "^0.213.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@sentry/browser": "10.43.0", "@sentry/cli": "^2.58.5", "@sentry/core": "10.43.0", @@ -81,8 +81,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.client.ts && madge --circular src/index.server.ts && madge --circular src/index.types.ts", "clean": "rimraf build coverage sentry-react-router-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/react/package.json b/packages/react/package.json index caef73916ae1..e10f06066131 100644 --- a/packages/react/package.json +++ b/packages/react/package.json @@ -79,8 +79,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-react-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/react/src/hoist-non-react-statics.ts b/packages/react/src/hoist-non-react-statics.ts index 792e05584e38..1b9fa7f01e75 100644 --- a/packages/react/src/hoist-non-react-statics.ts +++ b/packages/react/src/hoist-non-react-statics.ts @@ -146,7 +146,7 @@ export function hoistNonReactStatics< // Use key directly - String(key) throws for Symbols if minified to '' + key (#18966) if ( !KNOWN_STATICS[key as keyof typeof KNOWN_STATICS] && - !(excludelist && excludelist[key as keyof C]) && + !excludelist?.[key as keyof C] && !sourceStatics?.[key as string] && !targetStatics?.[key as string] && !getOwnPropertyDescriptor(targetComponent, key) // Don't overwrite existing properties @@ -157,7 +157,7 @@ export function hoistNonReactStatics< try { // Avoid failures from read-only properties defineProperty(targetComponent, key, descriptor); - } catch (e) { + } catch (_e) { // Silently ignore errors } } diff --git a/packages/react/src/reactrouter-compat-utils/instrumentation.tsx b/packages/react/src/reactrouter-compat-utils/instrumentation.tsx index 5725309ff12b..54c344956948 100644 --- a/packages/react/src/reactrouter-compat-utils/instrumentation.tsx +++ b/packages/react/src/reactrouter-compat-utils/instrumentation.tsx @@ -249,6 +249,7 @@ function trackLazyRouteLoad(span: Span, promise: Promise): void { promises.add(promise); // Clean up when promise resolves/rejects + // oxlint-disable-next-line typescript/no-floating-promises promise.finally(() => { const currentPromises = pendingLazyRouteLoads.get(span); if (currentPromises) { @@ -613,8 +614,8 @@ export function createV6CompatibleWrapCreateMemoryRouter< const initialEntries = opts?.initialEntries; const initialIndex = opts?.initialIndex; - const hasOnlyOneInitialEntry = initialEntries && initialEntries.length === 1; - const hasIndexedEntry = initialIndex !== undefined && initialEntries && initialEntries[initialIndex]; + const hasOnlyOneInitialEntry = initialEntries?.length === 1; + const hasIndexedEntry = initialIndex !== undefined && initialEntries?.[initialIndex]; initialEntry = hasOnlyOneInitialEntry ? initialEntries[0] diff --git a/packages/react/src/reactrouter-compat-utils/route-manifest.ts b/packages/react/src/reactrouter-compat-utils/route-manifest.ts index 6160cad657c3..bdc49f76705e 100644 --- a/packages/react/src/reactrouter-compat-utils/route-manifest.ts +++ b/packages/react/src/reactrouter-compat-utils/route-manifest.ts @@ -36,7 +36,7 @@ const SORTED_MANIFEST_CACHE = new WeakMap(); * Optionally strips a basename prefix before matching. */ export function matchRouteManifest(pathname: string, manifest: string[], basename?: string): string | null { - if (!pathname || !manifest || !manifest.length) { + if (!pathname || !manifest?.length) { return null; } diff --git a/packages/remix/package.json b/packages/remix/package.json index ebb95e71ee39..2c494592d7a1 100644 --- a/packages/remix/package.json +++ b/packages/remix/package.json @@ -65,8 +65,8 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/instrumentation": "^0.211.0", - "@opentelemetry/semantic-conventions": "^1.39.0", + "@opentelemetry/instrumentation": "^0.213.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@remix-run/router": "^1.23.2", "@sentry/cli": "^2.58.5", "@sentry/core": "10.43.0", @@ -103,8 +103,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.server.ts && madge --circular src/index.client.ts", "clean": "rimraf build coverage sentry-remix-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "yarn test:unit", "test:integration": "run-s test:integration:clean test:integration:prepare test:integration:client test:integration:server", diff --git a/packages/remix/src/client/performance.tsx b/packages/remix/src/client/performance.tsx index 213f4eb43176..b3cde64d72de 100644 --- a/packages/remix/src/client/performance.tsx +++ b/packages/remix/src/client/performance.tsx @@ -169,7 +169,7 @@ export function withSentry

, R extends React.Co const matches = _useMatches(); _useEffect(() => { - const lastMatch = matches && matches[matches.length - 1]; + const lastMatch = matches?.[matches.length - 1]; if (lastMatch) { const { name, source } = getTransactionNameAndSource(location.pathname, lastMatch.id); diff --git a/packages/remix/src/client/remixRouteParameterization.ts b/packages/remix/src/client/remixRouteParameterization.ts index 5f20c3c81c79..6a587afffdd9 100644 --- a/packages/remix/src/client/remixRouteParameterization.ts +++ b/packages/remix/src/client/remixRouteParameterization.ts @@ -92,7 +92,7 @@ function getManifest(): RouteManifest | null { cachedManifest = manifest; cachedManifestString = currentManifestString; return manifest; - } catch (error) { + } catch { DEBUG_BUILD && debug.warn('Could not extract route manifest'); return null; } diff --git a/packages/remix/src/utils/utils.ts b/packages/remix/src/utils/utils.ts index 83dda0b816a3..c179bc43f61f 100644 --- a/packages/remix/src/utils/utils.ts +++ b/packages/remix/src/utils/utils.ts @@ -13,7 +13,7 @@ type ServerRouteManifest = ServerBuild['routes']; export async function storeFormDataKeys( args: LoaderFunctionArgs | ActionFunctionArgs, span: Span, - formDataKeys?: Record | undefined, + formDataKeys?: Record, ): Promise { try { // We clone the request for Remix be able to read the FormData later. diff --git a/packages/replay-canvas/package.json b/packages/replay-canvas/package.json index 0e278ae2c0ab..502efad3c028 100644 --- a/packages/replay-canvas/package.json +++ b/packages/replay-canvas/package.json @@ -44,8 +44,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build sentry-replay-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/{bundles,npm/cjs}/*.js && es-check es2020 ./build/npm/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/replay-internal/package.json b/packages/replay-internal/package.json index 118ca8890d87..27724a217d67 100644 --- a/packages/replay-internal/package.json +++ b/packages/replay-internal/package.json @@ -57,12 +57,10 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build sentry-replay-*.tgz", - "fix": "run-s fix:oxfmt fix:oxlint", - "fix:oxlint": "oxlint . --fix", - "fix:oxfmt": "oxfmt \"src/**/*.ts\" \"test/**/*.ts\" --write", - "lint": "run-s lint:oxfmt lint:oxlint", - "lint:oxlint": "oxlint .", - "lint:oxfmt": "oxfmt \"src/**/*.ts\" \"test/**/*.ts\" --check", + "format": "oxfmt \"src/**/*.ts\" \"test/**/*.ts\" --write", + "format:check": "oxfmt \"src/**/*.ts\" \"test/**/*.ts\" --check", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "lint:es-compatibility": "es-check es2020 ./build/{bundles,npm/cjs}/*.js && es-check es2020 ./build/npm/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/replay-internal/src/coreHandlers/handleAfterSendEvent.ts b/packages/replay-internal/src/coreHandlers/handleAfterSendEvent.ts index 4df1b62532ac..ae0f6ad86be4 100644 --- a/packages/replay-internal/src/coreHandlers/handleAfterSendEvent.ts +++ b/packages/replay-internal/src/coreHandlers/handleAfterSendEvent.ts @@ -57,7 +57,7 @@ function handleErrorEvent(replay: ReplayContainer, event: ErrorEvent): void { // If error event is tagged with replay id it means it was sampled (when in buffer mode) // Need to be very careful that this does not cause an infinite loop - if (replay.recordingMode !== 'buffer' || !event.tags || !event.tags.replayId) { + if (replay.recordingMode !== 'buffer' || !event.tags?.replayId) { return; } diff --git a/packages/replay-internal/src/coreHandlers/handleHistory.ts b/packages/replay-internal/src/coreHandlers/handleHistory.ts index e12c65745914..aa85757e3a3e 100644 --- a/packages/replay-internal/src/coreHandlers/handleHistory.ts +++ b/packages/replay-internal/src/coreHandlers/handleHistory.ts @@ -38,6 +38,7 @@ export function handleHistorySpanListener(replay: ReplayContainer): (handlerData replay.triggerUserActivity(); replay.addUpdate(() => { + // oxlint-disable-next-line typescript/no-floating-promises createPerformanceSpans(replay, [result]); // Returning false to flush return false; diff --git a/packages/replay-internal/src/coreHandlers/util/addNetworkBreadcrumb.ts b/packages/replay-internal/src/coreHandlers/util/addNetworkBreadcrumb.ts index b67b27e6ab7f..38a19d1030f9 100644 --- a/packages/replay-internal/src/coreHandlers/util/addNetworkBreadcrumb.ts +++ b/packages/replay-internal/src/coreHandlers/util/addNetworkBreadcrumb.ts @@ -20,6 +20,7 @@ export function addNetworkBreadcrumb( } replay.addUpdate(() => { + // oxlint-disable-next-line typescript/no-floating-promises createPerformanceSpans(replay, [result]); // Returning true will cause `addUpdate` to not flush // We do not want network requests to cause a flush. This will prevent diff --git a/packages/replay-internal/src/replay.ts b/packages/replay-internal/src/replay.ts index 10dba8758d8a..cab408ca9d5d 100644 --- a/packages/replay-internal/src/replay.ts +++ b/packages/replay-internal/src/replay.ts @@ -748,8 +748,7 @@ export class ReplayContainer implements ReplayContainerInterface { if ( this._lastActivity && isExpired(this._lastActivity, this.timeouts.sessionIdlePause) && - this.session && - this.session.sampled === 'session' + this.session?.sampled === 'session' ) { // Pause recording only for session-based replays. Otherwise, resuming // will create a new replay and will conflict with users who only choose diff --git a/packages/replay-internal/src/util/handleRecordingEmit.ts b/packages/replay-internal/src/util/handleRecordingEmit.ts index aeb49f0cd259..215f94daa1db 100644 --- a/packages/replay-internal/src/util/handleRecordingEmit.ts +++ b/packages/replay-internal/src/util/handleRecordingEmit.ts @@ -146,7 +146,7 @@ export function createOptionsEvent(replay: ReplayContainer): ReplayOptionFrameEv */ function addSettingsEvent(replay: ReplayContainer, isCheckout?: boolean): void { // Only need to add this event when sending the first segment - if (!isCheckout || !replay.session || replay.session.segmentId !== 0) { + if (!isCheckout || replay.session?.segmentId !== 0) { return; } diff --git a/packages/replay-worker/package.json b/packages/replay-worker/package.json index 7f41be6dfc46..a38566d64587 100644 --- a/packages/replay-worker/package.json +++ b/packages/replay-worker/package.json @@ -46,8 +46,8 @@ "build:dev:watch": "yarn build:watch", "build:transpile:watch": "yarn build:transpile --watch", "clean": "rimraf build", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch" diff --git a/packages/replay-worker/test/unit/Compressor.test.ts b/packages/replay-worker/test/unit/Compressor.test.ts index 74f01ef42168..2d4181c2ec28 100644 --- a/packages/replay-worker/test/unit/Compressor.test.ts +++ b/packages/replay-worker/test/unit/Compressor.test.ts @@ -30,7 +30,7 @@ describe('Compressor', () => { const compressor = new Compressor(); // @ts-expect-error ignoring type for test - expect(() => void compressor.addEvent(undefined)).toThrow(); + expect(() => compressor.addEvent(undefined)).toThrow(); const compressed = compressor.finish(); diff --git a/packages/solid/package.json b/packages/solid/package.json index afe5dbe97ce3..4d0b8c358205 100644 --- a/packages/solid/package.json +++ b/packages/solid/package.json @@ -94,8 +94,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts && madge --circular src/solidrouter.ts && madge --circular src/tanstackrouter.ts", "clean": "rimraf build coverage sentry-solid-*.tgz ./*.d.ts ./*.d.ts.map", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/solidstart/package.json b/packages/solidstart/package.json index ad148a4725eb..b5f20974225d 100644 --- a/packages/solidstart/package.json +++ b/packages/solidstart/package.json @@ -95,8 +95,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.client.ts && madge --circular src/index.server.ts && madge --circular src/index.types.ts && madge --circular src/solidrouter.client.ts && madge --circular src/solidrouter.server.ts && madge --circular src/solidrouter.ts", "clean": "rimraf build coverage sentry-solidstart-*.tgz ./*.d.ts ./*.d.ts.map ./client ./server", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/solidstart/src/index.types.ts b/packages/solidstart/src/index.types.ts index da45898b3915..4c5ff491c740 100644 --- a/packages/solidstart/src/index.types.ts +++ b/packages/solidstart/src/index.types.ts @@ -22,8 +22,8 @@ export declare const contextLinesIntegration: typeof clientSdk.contextLinesInteg export declare const getDefaultIntegrations: (options: Options) => Integration[]; export declare const defaultStackParser: StackParser; -export declare function close(timeout?: number | undefined): PromiseLike; -export declare function flush(timeout?: number | undefined): PromiseLike; +export declare function close(timeout?: number): PromiseLike; +export declare function flush(timeout?: number): PromiseLike; export declare function lastEventId(): string | undefined; export declare const logger: typeof clientSdk.logger | typeof serverSdk.logger; diff --git a/packages/solidstart/src/server/withServerActionInstrumentation.ts b/packages/solidstart/src/server/withServerActionInstrumentation.ts index 753188f805c3..bcd9389a6bf4 100644 --- a/packages/solidstart/src/server/withServerActionInstrumentation.ts +++ b/packages/solidstart/src/server/withServerActionInstrumentation.ts @@ -42,6 +42,7 @@ export async function withServerActionInstrumentation { + // oxlint-disable-next-line typescript/await-thenable -- callback may be async at runtime const result = await handleCallbackErrors(callback, error => { if (!isRedirect(error)) { span.setStatus({ code: SPAN_STATUS_ERROR, message: 'internal_error' }); diff --git a/packages/svelte/package.json b/packages/svelte/package.json index dc5983497ffc..a13140f6ce03 100644 --- a/packages/svelte/package.json +++ b/packages/svelte/package.json @@ -65,8 +65,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-svelte-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/svelte/test/components/Dummy.svelte b/packages/svelte/test/components/Dummy.svelte index ef814473f6cf..12c066fc2c1b 100644 --- a/packages/svelte/test/components/Dummy.svelte +++ b/packages/svelte/test/components/Dummy.svelte @@ -3,6 +3,7 @@ import * as Sentry from '../../src/index'; // Pass options to trackComponent as props of this component + // oxlint-disable-next-line no-unassigned-vars export let options; Sentry.trackComponent(options); diff --git a/packages/sveltekit/package.json b/packages/sveltekit/package.json index cd317176cfe8..c777e4136055 100644 --- a/packages/sveltekit/package.json +++ b/packages/sveltekit/package.json @@ -75,8 +75,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.client.ts && madge --circular src/index.server.ts && madge --circular src/index.types.ts", "clean": "rimraf build coverage sentry-sveltekit-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/sveltekit/src/index.types.ts b/packages/sveltekit/src/index.types.ts index f6bdfef640c7..d46e88e720ed 100644 --- a/packages/sveltekit/src/index.types.ts +++ b/packages/sveltekit/src/index.types.ts @@ -54,8 +54,8 @@ export declare const vercelAIIntegration: typeof serverSdk.vercelAIIntegration; export declare const getDefaultIntegrations: (options: Options) => Integration[]; export declare const defaultStackParser: StackParser; -export declare function close(timeout?: number | undefined): PromiseLike; -export declare function flush(timeout?: number | undefined): PromiseLike; +export declare function close(timeout?: number): PromiseLike; +export declare function flush(timeout?: number): PromiseLike; export declare function lastEventId(): string | undefined; export declare function trackComponent(options: clientSdk.TrackingOptions): ReturnType; diff --git a/packages/sveltekit/src/vite/sourceMaps.ts b/packages/sveltekit/src/vite/sourceMaps.ts index 52f72bac3e52..ac29d436c43d 100644 --- a/packages/sveltekit/src/vite/sourceMaps.ts +++ b/packages/sveltekit/src/vite/sourceMaps.ts @@ -82,6 +82,7 @@ export async function makeCustomSentryVitePlugins( const { debug } = mergedOptions; + // oxlint-disable-next-line typescript/await-thenable -- sentryVitePlugin may return a Promise in some versions const sentryPlugins: Plugin[] = await sentryVitePlugin(mergedOptions); // In @sentry/vite-plugin v5, all functionality is consolidated into a single 'sentry-vite-plugin'. diff --git a/packages/sveltekit/test/server-common/handle.test.ts b/packages/sveltekit/test/server-common/handle.test.ts index 7ecd222ad780..286bf7254fdb 100644 --- a/packages/sveltekit/test/server-common/handle.test.ts +++ b/packages/sveltekit/test/server-common/handle.test.ts @@ -314,7 +314,7 @@ describe('sentryHandle', () => { it('send errors to Sentry', async () => { try { await sentryHandle()({ event: mockEvent(), resolve: resolve(type, isError) }); - } catch (e) { + } catch (_e) { expect(mockCaptureException).toBeCalledTimes(1); expect(mockCaptureException).toBeCalledWith(expect.any(Error), { mechanism: { handled: false, type: 'auto.function.sveltekit.handle' }, diff --git a/packages/tanstackstart-react/package.json b/packages/tanstackstart-react/package.json index a4894621fe29..f3979d664831 100644 --- a/packages/tanstackstart-react/package.json +++ b/packages/tanstackstart-react/package.json @@ -64,7 +64,7 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/semantic-conventions": "^1.37.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@sentry-internal/browser-utils": "10.43.0", "@sentry/core": "10.43.0", "@sentry/node": "10.43.0", @@ -87,8 +87,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.client.ts && madge --circular src/index.server.ts && madge --circular src/index.types.ts", "clean": "rimraf build coverage sentry-tanstackstart-react-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/tanstackstart/package.json b/packages/tanstackstart/package.json index 6634be81d3e8..77f9a352d93f 100644 --- a/packages/tanstackstart/package.json +++ b/packages/tanstackstart/package.json @@ -52,8 +52,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.client.ts && madge --circular src/index.server.ts && madge --circular src/index.types.ts", "clean": "rimraf build coverage sentry-tanstackstart-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "yarn test:unit", "test:unit": "vitest run", diff --git a/packages/types/package.json b/packages/types/package.json index 5dfce02adcd2..4ffdb5dc3cf1 100644 --- a/packages/types/package.json +++ b/packages/types/package.json @@ -50,9 +50,9 @@ "build:transpile:watch": "rollup -c rollup.npm.config.mjs --watch", "build:tarball": "npm pack", "clean": "rimraf build sentry-types-*.tgz", - "lint": "oxlint .", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", - "fix": "oxlint . --fix", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", "yalc:publish": "yalc publish --push --sig" }, "dependencies": { diff --git a/packages/vercel-edge/package.json b/packages/vercel-edge/package.json index 2419a83233a2..9cd0de37b0c3 100644 --- a/packages/vercel-edge/package.json +++ b/packages/vercel-edge/package.json @@ -40,14 +40,14 @@ }, "dependencies": { "@opentelemetry/api": "^1.9.0", - "@opentelemetry/resources": "^2.5.1", + "@opentelemetry/resources": "^2.6.0", "@sentry/core": "10.43.0" }, "devDependencies": { "@edge-runtime/types": "4.0.0", - "@opentelemetry/core": "^2.5.1", - "@opentelemetry/sdk-trace-base": "^2.5.1", - "@opentelemetry/semantic-conventions": "^1.39.0", + "@opentelemetry/core": "^2.6.0", + "@opentelemetry/sdk-trace-base": "^2.6.0", + "@opentelemetry/semantic-conventions": "^1.40.0", "@sentry/opentelemetry": "10.43.0" }, "scripts": { @@ -63,8 +63,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-vercel-edge-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/vue/package.json b/packages/vue/package.json index 4441883c13ea..8bfc61d11b87 100644 --- a/packages/vue/package.json +++ b/packages/vue/package.json @@ -85,8 +85,8 @@ "build:tarball": "npm pack", "circularDepCheck": "madge --circular src/index.ts && madge --circular src/tanstackrouter.ts", "clean": "rimraf build coverage sentry-vue-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/cjs/*.js && es-check es2020 ./build/esm/*.js --module", "test": "vitest run", "test:watch": "vitest --watch", diff --git a/packages/wasm/package.json b/packages/wasm/package.json index d477cec82fda..1cc1b320ffa9 100644 --- a/packages/wasm/package.json +++ b/packages/wasm/package.json @@ -59,8 +59,8 @@ "test:watch": "vitest --watch", "circularDepCheck": "madge --circular src/index.ts", "clean": "rimraf build coverage sentry-wasm-*.tgz", - "fix": "oxlint . --fix", - "lint": "oxlint .", + "lint:fix": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --fix --type-aware", + "lint": "OXLINT_TSGOLINT_DANGEROUSLY_SUPPRESS_PROGRAM_DIAGNOSTICS=true oxlint . --type-aware", "lint:es-compatibility": "es-check es2020 ./build/{bundles,npm/cjs}/*.js && es-check es2020 ./build/npm/esm/*.js --module", "yalc:publish": "yalc publish --push --sig" }, diff --git a/yarn.lock b/yarn.lock index c65cb34105f6..8b037c404496 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2999,6 +2999,11 @@ resolved "https://registry.yarnpkg.com/@bomb.sh/tab/-/tab-0.0.12.tgz#68d9babce5d49df8c201fa993f1157ba3f61c2f0" integrity sha512-dYRwg4MqfHR5/BcTy285XOGRhjQFmNpaJBZ0tl2oU+RY595MQ5ApTF6j3OvauPAooHL6cfoOZMySQrOQztT8RQ== +"@borewit/text-codec@^0.2.1": + version "0.2.1" + resolved "https://registry.yarnpkg.com/@borewit/text-codec/-/text-codec-0.2.1.tgz#5d171538907a8cb395fdc2eb5e8f7947d96c7f2f" + integrity sha512-k7vvKPbf7J2fZ5klGRD9AeKfUvojuZIQ3BT5u7Jfv+puwXkUBUT5PVyMDfJZpy30CBDXGMgw7fguK/lpOMBvgw== + "@cfworker/json-schema@^4.0.2": version "4.1.1" resolved "https://registry.yarnpkg.com/@cfworker/json-schema/-/json-schema-4.1.1.tgz#4a2a3947ee9fa7b7c24be981422831b8674c3be6" @@ -3269,6 +3274,11 @@ dependencies: "@edge-runtime/primitives" "6.0.0" +"@effect/vitest@^0.23.9": + version "0.23.13" + resolved "https://registry.yarnpkg.com/@effect/vitest/-/vitest-0.23.13.tgz#17edf9d8e3443f080ff8fe93bd37b023612a07a4" + integrity sha512-F3x2phMXuVzqWexdcYp8v0z1qQHkKxp2UaHNbqZaEjPEp8FBz/iMwbi6iS/oIWzLfGF8XqdP8BGJptvGIJONNw== + "@ember-data/rfc395-data@^0.0.4": version "0.0.4" resolved "https://registry.yarnpkg.com/@ember-data/rfc395-data/-/rfc395-data-0.0.4.tgz#ecb86efdf5d7733a76ff14ea651a1b0ed1f8a843" @@ -4414,15 +4424,15 @@ resolved "https://registry.yarnpkg.com/@fastify/busboy/-/busboy-2.0.0.tgz#f22824caff3ae506b18207bad4126dbc6ccdb6b8" integrity sha512-JUFJad5lv7jxj926GPgymrWQxxjPYuJNiNjNMzqT+HiuP6Vl3dk5xzG+8sTX96np0ZAluvaMzPsjhHZ5rNuNQQ== -"@fastify/otel@0.16.0": - version "0.16.0" - resolved "https://registry.yarnpkg.com/@fastify/otel/-/otel-0.16.0.tgz#e003c9b81039490af9141a7f1397de6b05baa768" - integrity sha512-2304BdM5Q/kUvQC9qJO1KZq3Zn1WWsw+WWkVmFEaj1UE2hEIiuFqrPeglQOwEtw/ftngisqfQ3v70TWMmwhhHA== +"@fastify/otel@0.17.1": + version "0.17.1" + resolved "https://registry.yarnpkg.com/@fastify/otel/-/otel-0.17.1.tgz#a7f13edc40dbc2e0c2a59d54e388f11e4d2235ce" + integrity sha512-K4wyxfUZx2ux5o+b6BtTqouYFVILohLZmSbA2tKUueJstNcBnoGPVhllCaOvbQ3ZrXdUxUC/fyrSWSCqHhdOPg== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.208.0" + "@opentelemetry/instrumentation" "^0.212.0" "@opentelemetry/semantic-conventions" "^1.28.0" - minimatch "^10.0.3" + minimatch "^10.2.4" "@gar/promisify@^1.1.3": version "1.1.3" @@ -5197,10 +5207,10 @@ "@jridgewell/resolve-uri" "^3.1.0" "@jridgewell/sourcemap-codec" "^1.4.14" -"@js-joda/core@^5.6.1": - version "5.6.3" - resolved "https://registry.yarnpkg.com/@js-joda/core/-/core-5.6.3.tgz#41ae1c07de1ebe0f6dde1abcbc9700a09b9c6056" - integrity sha512-T1rRxzdqkEXcou0ZprN1q9yDRlvzCPLqmlNt5IIsGBzoEVgLCCYrKEwc84+TvsXuAc95VAZwtWD2zVsKPY4bcA== +"@js-joda/core@^5.6.5": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@js-joda/core/-/core-5.7.0.tgz#526d437b07cbb41e28df34d487cbfccbe730185b" + integrity sha512-WBu4ULVVxySLLzK1Ppq+OdfP+adRS4ntmDQT915rzDJ++i95gc2jZkM5B6LWEAwN3lGXpfie3yPABozdD3K3Vg== "@kwsites/file-exists@^1.1.1": version "1.1.1" @@ -6182,17 +6192,17 @@ dependencies: "@opentelemetry/api" "^1.3.0" -"@opentelemetry/api-logs@0.208.0": - version "0.208.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/api-logs/-/api-logs-0.208.0.tgz#56d3891010a1fa1cf600ba8899ed61b43ace511c" - integrity sha512-CjruKY9V6NMssL/T1kAFgzosF1v9o6oeN+aX5JB/C/xPNtmgIJqcXHG7fA82Ou1zCpWGl4lROQUKwUNE1pMCyg== +"@opentelemetry/api-logs@0.212.0": + version "0.212.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/api-logs/-/api-logs-0.212.0.tgz#ec66a0951b84b1f082e13fd8a027b9f9d65a3f7a" + integrity sha512-TEEVrLbNROUkYY51sBJGk7lO/OLjuepch8+hmpM6ffMJQ2z/KVCjdHuCFX6fJj8OkJP2zckPjrJzQtXU3IAsFg== dependencies: "@opentelemetry/api" "^1.3.0" -"@opentelemetry/api-logs@0.211.0": - version "0.211.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/api-logs/-/api-logs-0.211.0.tgz#32d9ed98939956a84d4e2ff5e01598cb9d28d744" - integrity sha512-swFdZq8MCdmdR22jTVGQDhwqDzcI4M10nhjXkLr1EsIzXgZBqm4ZlmmcWsg3TSNf+3mzgOiqveXmBLZuDi2Lgg== +"@opentelemetry/api-logs@0.213.0": + version "0.213.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/api-logs/-/api-logs-0.213.0.tgz#c7abc7d3c4586cfbfd737c0a2fcfb2323a9def75" + integrity sha512-zRM5/Qj6G84Ej3F1yt33xBVY/3tnMxtL1fiDIxYbDWYaZ/eudVw3/PBiZ8G7JwUxXxjW8gU4g6LnOyfGKYHYgw== dependencies: "@opentelemetry/api" "^1.3.0" @@ -6201,240 +6211,233 @@ resolved "https://registry.yarnpkg.com/@opentelemetry/api/-/api-1.9.0.tgz#d03eba68273dc0f7509e2a3d5cba21eae10379fe" integrity sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg== -"@opentelemetry/context-async-hooks@^2.5.1": - version "2.5.1" - resolved "https://registry.yarnpkg.com/@opentelemetry/context-async-hooks/-/context-async-hooks-2.5.1.tgz#457b8f9c1e219bf6e22b549d90f773db0a38fe06" - integrity sha512-MHbu8XxCHcBn6RwvCt2Vpn1WnLMNECfNKYB14LI5XypcgH4IE0/DiVifVR9tAkwPMyLXN8dOoPJfya3IryLQVw== - -"@opentelemetry/core@2.5.0": - version "2.5.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/core/-/core-2.5.0.tgz#3b2ac6cf471ed9a85eea836048a4de77a2e549d3" - integrity sha512-ka4H8OM6+DlUhSAZpONu0cPBtPPTQKxbxVzC4CzVx5+K4JnroJVBtDzLAMx4/3CDTJXRvVFhpFjtl4SaiTNoyQ== - dependencies: - "@opentelemetry/semantic-conventions" "^1.29.0" +"@opentelemetry/context-async-hooks@^2.6.0": + version "2.6.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/context-async-hooks/-/context-async-hooks-2.6.0.tgz#6c824e900630b378233c1a78ca7f0dc5a3b460b2" + integrity sha512-L8UyDwqpTcbkIK5cgwDRDYDoEhQoj8wp8BwsO19w3LB1Z41yEQm2VJyNfAi9DrLP/YTqXqWpKHyZfR9/tFYo1Q== -"@opentelemetry/core@2.5.1", "@opentelemetry/core@^2.0.0", "@opentelemetry/core@^2.5.1": - version "2.5.1" - resolved "https://registry.yarnpkg.com/@opentelemetry/core/-/core-2.5.1.tgz#b5d830ab499bc13e29f6efa88a165630f25d2ad2" - integrity sha512-Dwlc+3HAZqpgTYq0MUyZABjFkcrKTePwuiFVLjahGD8cx3enqihmpAmdgNFO1R4m/sIe5afjJrA25Prqy4NXlA== +"@opentelemetry/core@2.6.0", "@opentelemetry/core@^2.0.0", "@opentelemetry/core@^2.6.0": + version "2.6.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/core/-/core-2.6.0.tgz#719c829ed98bd7af808a2d2c83374df1fd1f3c66" + integrity sha512-HLM1v2cbZ4TgYN6KEOj+Bbj8rAKriOdkF9Ed3tG25FoprSiQl7kYc+RRT6fUZGOvx0oMi5U67GoFdT+XUn8zEg== dependencies: "@opentelemetry/semantic-conventions" "^1.29.0" -"@opentelemetry/instrumentation-amqplib@0.58.0": - version "0.58.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-amqplib/-/instrumentation-amqplib-0.58.0.tgz#e3dc86ebfa7d72fe861a63b1c24a062faeb64a8c" - integrity sha512-fjpQtH18J6GxzUZ+cwNhWUpb71u+DzT7rFkg5pLssDGaEber91Y2WNGdpVpwGivfEluMlNMZumzjEqfg8DeKXQ== +"@opentelemetry/instrumentation-amqplib@0.60.0": + version "0.60.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-amqplib/-/instrumentation-amqplib-0.60.0.tgz#a2b2abe3cf433bea166c18a703c8ddf6accf83da" + integrity sha512-q/B2IvoVXRm1M00MvhnzpMN6rKYOszPXVsALi6u0ss4AYHe+TidZEtLW9N1ZhrobI1dSriHnBqqtAOZVAv07sg== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.33.0" -"@opentelemetry/instrumentation-aws-sdk@0.66.0": - version "0.66.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-aws-sdk/-/instrumentation-aws-sdk-0.66.0.tgz#f81fbcf8b4efc3ed227fa4ac6235a61ddb451a3f" - integrity sha512-K+vFDsD0RsjxjCOWGOKgaqOoE5wxIPMA8wnGJ0no3m7MjVdpkS/dNOGUx2nYegpqZzU/jZ0qvc+JrfkvkzcUyg== +"@opentelemetry/instrumentation-aws-sdk@0.68.0": + version "0.68.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-aws-sdk/-/instrumentation-aws-sdk-0.68.0.tgz#436353e94d32c7cdb5b6bb4ed28bdd16bd4f39a4" + integrity sha512-nHXSRX3iYSE9MaiPE+jIovuNA8dTmleeg0vdLHkk5nvWCYFf/I9kMdqA3KcfKCPonVc5+NtSTft6OVtuGtawIA== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.34.0" -"@opentelemetry/instrumentation-connect@0.54.0": - version "0.54.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-connect/-/instrumentation-connect-0.54.0.tgz#87312850844b6c57976d00bd3256d55650543772" - integrity sha512-43RmbhUhqt3uuPnc16cX6NsxEASEtn8z/cYV8Zpt6EP4p2h9s4FNuJ4Q9BbEQ2C0YlCCB/2crO1ruVz/hWt8fA== +"@opentelemetry/instrumentation-connect@0.56.0": + version "0.56.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-connect/-/instrumentation-connect-0.56.0.tgz#8d846d2f7cf1f6b2723e5b0ff5595e8d31cb7446" + integrity sha512-PKp+sSZ7AfzMvGgO3VCyo1inwNu+q7A1k9X88WK4PQ+S6Hp7eFk8pie+sWHDTaARovmqq5V2osav3lQej2B0nw== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.27.0" "@types/connect" "3.4.38" -"@opentelemetry/instrumentation-dataloader@0.28.0": - version "0.28.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-dataloader/-/instrumentation-dataloader-0.28.0.tgz#b857bb038e4a2a3b7278f3da89a1e210bb15339e" - integrity sha512-ExXGBp0sUj8yhm6Znhf9jmuOaGDsYfDES3gswZnKr4MCqoBWQdEFn6EoDdt5u+RdbxQER+t43FoUihEfTSqsjA== +"@opentelemetry/instrumentation-dataloader@0.30.0": + version "0.30.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-dataloader/-/instrumentation-dataloader-0.30.0.tgz#7fbea57b27165324092639abf090ca3697eb7a80" + integrity sha512-MXHP2Q38cd2OhzEBKAIXUi9uBlPEYzF6BNJbyjUXBQ6kLaf93kRC41vNMIz0Nl5mnuwK7fDvKT+/lpx7BXRwdg== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" -"@opentelemetry/instrumentation-express@0.59.0": - version "0.59.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-express/-/instrumentation-express-0.59.0.tgz#c2ac7dcb4f9904926518408cdf4efb046e724382" - integrity sha512-pMKV/qnHiW/Q6pmbKkxt0eIhuNEtvJ7sUAyee192HErlr+a1Jx+FZ3WjfmzhQL1geewyGEiPGkmjjAgNY8TgDA== +"@opentelemetry/instrumentation-express@0.61.0": + version "0.61.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-express/-/instrumentation-express-0.61.0.tgz#49b4d144ab6e9d6e035941a51f5e573e84e3647f" + integrity sha512-Xdmqo9RZuZlL29Flg8QdwrrX7eW1CZ7wFQPKHyXljNymgKhN1MCsYuqQ/7uxavhSKwAl7WxkTzKhnqpUApLMvQ== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.27.0" -"@opentelemetry/instrumentation-fs@0.30.0": - version "0.30.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-fs/-/instrumentation-fs-0.30.0.tgz#5e28edde0591dc4ffa471a86a68f91e737fe31fb" - integrity sha512-n3Cf8YhG7reaj5dncGlRIU7iT40bxPOjsBEA5Bc1a1g6e9Qvb+JFJ7SEiMlPbUw4PBmxE3h40ltE8LZ3zVt6OA== +"@opentelemetry/instrumentation-fs@0.32.0": + version "0.32.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-fs/-/instrumentation-fs-0.32.0.tgz#2010d86da8ab3d543f8e44c8fff81b94f904d91d" + integrity sha512-koR6apx0g0wX6RRiPpjA4AFQUQUbXrK16kq4/SZjVp7u5cffJhNkY4TnITxcGA4acGSPYAfx3NHRIv4Khn1axQ== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" -"@opentelemetry/instrumentation-generic-pool@0.54.0": - version "0.54.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-generic-pool/-/instrumentation-generic-pool-0.54.0.tgz#9f3ad0cedbfe5011efe4ebdc76c85a73a0b967a6" - integrity sha512-8dXMBzzmEdXfH/wjuRvcJnUFeWzZHUnExkmFJ2uPfa31wmpyBCMxO59yr8f/OXXgSogNgi/uPo9KW9H7LMIZ+g== +"@opentelemetry/instrumentation-generic-pool@0.56.0": + version "0.56.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-generic-pool/-/instrumentation-generic-pool-0.56.0.tgz#01560f52d5bac6fb6312a1f0bc74bf0939119894" + integrity sha512-fg+Jffs6fqrf0uQS0hom7qBFKsbtpBiBl8+Vkc63Gx8xh6pVh+FhagmiO6oM0m3vyb683t1lP7yGYq22SiDnqg== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" -"@opentelemetry/instrumentation-graphql@0.58.0": - version "0.58.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-graphql/-/instrumentation-graphql-0.58.0.tgz#3ca294ba410e04c920dc82ab4caa23ec1c2e1a2e" - integrity sha512-+yWVVY7fxOs3j2RixCbvue8vUuJ1inHxN2q1sduqDB0Wnkr4vOzVKRYl/Zy7B31/dcPS72D9lo/kltdOTBM3bQ== +"@opentelemetry/instrumentation-graphql@0.61.0": + version "0.61.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-graphql/-/instrumentation-graphql-0.61.0.tgz#d1f896095a891c9576967645e7fcba935da82a94" + integrity sha512-pUiVASv6nh2XrerTvlbVHh7vKFzscpgwiQ/xvnZuAIzQ5lRjWVdRPUuXbvZJ/Yq79QsE81TZdJ7z9YsXiss1ew== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" -"@opentelemetry/instrumentation-hapi@0.57.0": - version "0.57.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-hapi/-/instrumentation-hapi-0.57.0.tgz#27b3a44a51444af3100a321f2e40623e89e5bb75" - integrity sha512-Os4THbvls8cTQTVA8ApLfZZztuuqGEeqog0XUnyRW7QVF0d/vOVBEcBCk1pazPFmllXGEdNbbat8e2fYIWdFbw== +"@opentelemetry/instrumentation-hapi@0.59.0": + version "0.59.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-hapi/-/instrumentation-hapi-0.59.0.tgz#412ea19e97ead684c5737e1f1aaa19ff940512d3" + integrity sha512-33wa4mEr+9+ztwdgLor1SeBu4Opz4IsmpcLETXAd3VmBrOjez8uQtrsOhPCa5Vhbm5gzDlMYTgFRLQzf8/YHFA== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.27.0" -"@opentelemetry/instrumentation-http@0.211.0": - version "0.211.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-http/-/instrumentation-http-0.211.0.tgz#2f12f83f0c21d37917fd9710fb5b755f28858cf6" - integrity sha512-n0IaQ6oVll9PP84SjbOCwDjaJasWRHi6BLsbMLiT6tNj7QbVOkuA5sk/EfZczwI0j5uTKl1awQPivO/ldVtsqA== +"@opentelemetry/instrumentation-http@0.213.0": + version "0.213.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-http/-/instrumentation-http-0.213.0.tgz#b379d6bcbae43a7d6d54070f3794527021f176c9" + integrity sha512-B978Xsm5XEPGhm1P07grDoaOFLHapJPkOG9h016cJsyWWxmiLnPu2M/4Nrm7UCkHSiLnkXgC+zVGUAIahy8EEA== dependencies: - "@opentelemetry/core" "2.5.0" - "@opentelemetry/instrumentation" "0.211.0" + "@opentelemetry/core" "2.6.0" + "@opentelemetry/instrumentation" "0.213.0" "@opentelemetry/semantic-conventions" "^1.29.0" forwarded-parse "2.1.2" -"@opentelemetry/instrumentation-ioredis@0.59.0": - version "0.59.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-ioredis/-/instrumentation-ioredis-0.59.0.tgz#530d06aa67b73ea732414557adebe1dde7de430f" - integrity sha512-875UxzBHWkW+P4Y45SoFM2AR8f8TzBMD8eO7QXGCyFSCUMP5s9vtt/BS8b/r2kqLyaRPK6mLbdnZznK3XzQWvw== +"@opentelemetry/instrumentation-ioredis@0.61.0": + version "0.61.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-ioredis/-/instrumentation-ioredis-0.61.0.tgz#e862540cbf188d0ca368d3a75020d165cb8beefb" + integrity sha512-hsHDadUtAFbws1YSDc1XW0svGFKiUbqv2td1Cby+UAiwvojm1NyBo/taifH0t8CuFZ0x/2SDm0iuTwrM5pnVOg== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/redis-common" "^0.38.2" "@opentelemetry/semantic-conventions" "^1.33.0" -"@opentelemetry/instrumentation-kafkajs@0.20.0": - version "0.20.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-kafkajs/-/instrumentation-kafkajs-0.20.0.tgz#521db06d10d39f42e842ce336e5c1e48b3da2956" - integrity sha512-yJXOuWZROzj7WmYCUiyT27tIfqBrVtl1/TwVbQyWPz7rL0r1Lu7kWjD0PiVeTCIL6CrIZ7M2s8eBxsTAOxbNvw== +"@opentelemetry/instrumentation-kafkajs@0.22.0": + version "0.22.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-kafkajs/-/instrumentation-kafkajs-0.22.0.tgz#a3cf7aca003f96211e514a348b7568799efdfba1" + integrity sha512-wJU4IBQMUikdJAcTChLFqK5lo+flo7pahqd8DSLv7uMxsdOdAHj6RzKYAm8pPfUS6ItKYutYyuicwKaFwQKsoA== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.30.0" -"@opentelemetry/instrumentation-knex@0.55.0": - version "0.55.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-knex/-/instrumentation-knex-0.55.0.tgz#fefc17d854a107d99ab0dbc8933d5897efce1abd" - integrity sha512-FtTL5DUx5Ka/8VK6P1VwnlUXPa3nrb7REvm5ddLUIeXXq4tb9pKd+/ThB1xM/IjefkRSN3z8a5t7epYw1JLBJQ== +"@opentelemetry/instrumentation-knex@0.57.0": + version "0.57.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-knex/-/instrumentation-knex-0.57.0.tgz#d46622a3f82f3df2ba29c64498d6ef828a40457e" + integrity sha512-vMCSh8kolEm5rRsc+FZeTZymWmIJwc40hjIKnXH4O0Dv/gAkJJIRXCsPX5cPbe0c0j/34+PsENd0HqKruwhVYw== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.33.1" -"@opentelemetry/instrumentation-koa@0.59.0": - version "0.59.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-koa/-/instrumentation-koa-0.59.0.tgz#7df8850fa193a8f590e3fbcab00016e25db27041" - integrity sha512-K9o2skADV20Skdu5tG2bogPKiSpXh4KxfLjz6FuqIVvDJNibwSdu5UvyyBzRVp1rQMV6UmoIk6d3PyPtJbaGSg== +"@opentelemetry/instrumentation-koa@0.61.0": + version "0.61.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-koa/-/instrumentation-koa-0.61.0.tgz#c12f57b023834afb1c142c11746d560bcc288b5b" + integrity sha512-lvrfWe9ShK/D2X4brmx8ZqqeWPfRl8xekU0FCn7C1dHm5k6+rTOOi36+4fnaHAP8lig9Ux6XQ1D4RNIpPCt1WQ== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.36.0" -"@opentelemetry/instrumentation-lru-memoizer@0.55.0": - version "0.55.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-lru-memoizer/-/instrumentation-lru-memoizer-0.55.0.tgz#776d5f10178adfbda7286b4f31adde8bb518d55a" - integrity sha512-FDBfT7yDGcspN0Cxbu/k8A0Pp1Jhv/m7BMTzXGpcb8ENl3tDj/51U65R5lWzUH15GaZA15HQ5A5wtafklxYj7g== +"@opentelemetry/instrumentation-lru-memoizer@0.57.0": + version "0.57.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-lru-memoizer/-/instrumentation-lru-memoizer-0.57.0.tgz#4da92ecd1bc5d5e9c7de28ea14ed57c9f29cfefd" + integrity sha512-cEqpUocSKJfwDtLYTTJehRLWzkZ2eoePCxfVIgGkGkb83fMB71O+y4MvRHJPbeV2bdoWdOVrl8uO0+EynWhTEA== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" -"@opentelemetry/instrumentation-mongodb@0.64.0": - version "0.64.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-mongodb/-/instrumentation-mongodb-0.64.0.tgz#0027c13fdd7506eb1f618998245edd244cc23cc7" - integrity sha512-pFlCJjweTqVp7B220mCvCld1c1eYKZfQt1p3bxSbcReypKLJTwat+wbL2YZoX9jPi5X2O8tTKFEOahO5ehQGsA== +"@opentelemetry/instrumentation-mongodb@0.66.0": + version "0.66.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-mongodb/-/instrumentation-mongodb-0.66.0.tgz#990bf4571382d3b02a9584927411c92c375d2fd4" + integrity sha512-d7m9QnAY+4TCWI4q1QRkfrc6fo/92VwssaB1DzQfXNRvu51b78P+HJlWP7Qg6N6nkwdb9faMZNBCZJfftmszkw== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.33.0" -"@opentelemetry/instrumentation-mongoose@0.57.0": - version "0.57.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-mongoose/-/instrumentation-mongoose-0.57.0.tgz#2ce3f3bbf66a255958c3a112a92079898d69f624" - integrity sha512-MthiekrU/BAJc5JZoZeJmo0OTX6ycJMiP6sMOSRTkvz5BrPMYDqaJos0OgsLPL/HpcgHP7eo5pduETuLguOqcg== +"@opentelemetry/instrumentation-mongoose@0.59.0": + version "0.59.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-mongoose/-/instrumentation-mongoose-0.59.0.tgz#8446ece86df59f09c630e7df6d794c8cd08f58d8" + integrity sha512-6/jWU+c1NgznkVLDU/2y0bXV2nJo3o9FWZ9mZ9nN6T/JBNRoMnVXZl2FdBmgH+a5MwaWLs5kmRJTP5oUVGIkPw== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.33.0" -"@opentelemetry/instrumentation-mysql2@0.57.0": - version "0.57.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-mysql2/-/instrumentation-mysql2-0.57.0.tgz#928eda47c6f4ab193d3363fcab01d81a70adc46b" - integrity sha512-nHSrYAwF7+aV1E1V9yOOP9TchOodb6fjn4gFvdrdQXiRE7cMuffyLLbCZlZd4wsspBzVwOXX8mpURdRserAhNA== +"@opentelemetry/instrumentation-mysql2@0.59.0": + version "0.59.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-mysql2/-/instrumentation-mysql2-0.59.0.tgz#938cd4a294b7e4a6e8c3855b8cfe267c8d2e5493" + integrity sha512-n9/xrVCRBfG9egVbffnlU1uhr+HX0vF4GgtAB/Bvm48wpFgRidqD8msBMiym1kRYzmpWvJqTxNT47u1MkgBEdw== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.33.0" "@opentelemetry/sql-common" "^0.41.2" -"@opentelemetry/instrumentation-mysql@0.57.0": - version "0.57.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-mysql/-/instrumentation-mysql-0.57.0.tgz#74d42a1c6d20aee93996f8b6f6b7b69469748754" - integrity sha512-HFS/+FcZ6Q7piM7Il7CzQ4VHhJvGMJWjx7EgCkP5AnTntSN5rb5Xi3TkYJHBKeR27A0QqPlGaCITi93fUDs++Q== +"@opentelemetry/instrumentation-mysql@0.59.0": + version "0.59.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-mysql/-/instrumentation-mysql-0.59.0.tgz#bf43cafbac5928236ea53704a52c718349c22e38" + integrity sha512-r+V/Fh0sm7Ga8/zk/TI5H5FQRAjwr0RrpfPf8kNIehlsKf12XnvIaZi8ViZkpX0gyPEpLXqzqWD6QHlgObgzZw== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.33.0" "@types/mysql" "2.15.27" -"@opentelemetry/instrumentation-nestjs-core@0.57.0": - version "0.57.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-nestjs-core/-/instrumentation-nestjs-core-0.57.0.tgz#7d42f690b8b78c08d9003425084911665c73deb8" - integrity sha512-mzTjjethjuk70o/vWUeV12QwMG9EAFJpkn13/q8zi++sNosf2hoGXTplIdbs81U8S3PJ4GxHKsBjM0bj1CGZ0g== +"@opentelemetry/instrumentation-nestjs-core@0.59.0": + version "0.59.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-nestjs-core/-/instrumentation-nestjs-core-0.59.0.tgz#858e7514e0842ceec1356cb0ba55cb3c60dbace6" + integrity sha512-tt2cFTENV8XB3D3xjhOz0q4hLc1eqkMZS5UyT9nnHF5FfYH94S2vAGdssvsMv+pFtA6/PmhPUZd4onUN1O7STg== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.30.0" -"@opentelemetry/instrumentation-pg@0.63.0": - version "0.63.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-pg/-/instrumentation-pg-0.63.0.tgz#852ca5519d756c613bb9f3153a5e70c2b805e5cf" - integrity sha512-dKm/ODNN3GgIQVlbD6ZPxwRc3kleLf95hrRWXM+l8wYo+vSeXtEpQPT53afEf6VFWDVzJK55VGn8KMLtSve/cg== +"@opentelemetry/instrumentation-pg@0.65.0": + version "0.65.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-pg/-/instrumentation-pg-0.65.0.tgz#f1f76f8c57c5c6fec68c77ce6ee104fee5de13e1" + integrity sha512-W0zpHEIEuyZ8zvb3njaX9AAbHgPYOsSWVOoWmv1sjVRSF6ZpBqtlxBWbU+6hhq1TFWBeWJOXZ8nZS/PUFpLJYQ== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.34.0" "@opentelemetry/sql-common" "^0.41.2" "@types/pg" "8.15.6" "@types/pg-pool" "2.0.7" -"@opentelemetry/instrumentation-redis@0.59.0": - version "0.59.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-redis/-/instrumentation-redis-0.59.0.tgz#44c1bd7852cdadbe77c1bdfa94185528012558cf" - integrity sha512-JKv1KDDYA2chJ1PC3pLP+Q9ISMQk6h5ey+99mB57/ARk0vQPGZTTEb4h4/JlcEpy7AYT8HIGv7X6l+br03Neeg== +"@opentelemetry/instrumentation-redis@0.61.0": + version "0.61.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-redis/-/instrumentation-redis-0.61.0.tgz#b43b9c3b5d0b124f2e60b055e4529a3a4b55dbc4" + integrity sha512-JnPexA034/0UJRsvH96B0erQoNOqKJZjE2ZRSw9hiTSC23LzE0nJE/u6D+xqOhgUhRnhhcPHq4MdYtmUdYTF+Q== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/redis-common" "^0.38.2" "@opentelemetry/semantic-conventions" "^1.27.0" -"@opentelemetry/instrumentation-tedious@0.30.0": - version "0.30.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-tedious/-/instrumentation-tedious-0.30.0.tgz#4a8906b5322c4add4132e6e086c23e17bc23626b" - integrity sha512-bZy9Q8jFdycKQ2pAsyuHYUHNmCxCOGdG6eg1Mn75RvQDccq832sU5OWOBnc12EFUELI6icJkhR7+EQKMBam2GA== +"@opentelemetry/instrumentation-tedious@0.32.0": + version "0.32.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-tedious/-/instrumentation-tedious-0.32.0.tgz#8204a14adb71adcbf7d72705244d606bb69e428a" + integrity sha512-BQS6gG8RJ1foEqfEZ+wxoqlwfCAzb1ZVG0ad8Gfe4x8T658HJCLGLd4E4NaoQd8EvPfLqOXgzGaE/2U4ytDSWA== dependencies: - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.33.0" "@types/tedious" "^4.0.14" -"@opentelemetry/instrumentation-undici@0.21.0": - version "0.21.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-undici/-/instrumentation-undici-0.21.0.tgz#dcb43a364c39e78217946aeb7aa09156e55f4c6c" - integrity sha512-gok0LPUOTz2FQ1YJMZzaHcOzDFyT64XJ8M9rNkugk923/p6lDGms/cRW1cqgqp6N6qcd6K6YdVHwPEhnx9BWbw== +"@opentelemetry/instrumentation-undici@0.23.0": + version "0.23.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation-undici/-/instrumentation-undici-0.23.0.tgz#e328bf6e53847ba7baa2a345d02221cc62917cec" + integrity sha512-LL0VySzKVR2cJSFVZaTYpZl1XTpBGnfzoQPe2W7McS2267ldsaEIqtQY6VXs2KCXN0poFjze5110PIpxHDaDGg== dependencies: "@opentelemetry/core" "^2.0.0" - "@opentelemetry/instrumentation" "^0.211.0" + "@opentelemetry/instrumentation" "^0.213.0" "@opentelemetry/semantic-conventions" "^1.24.0" -"@opentelemetry/instrumentation@0.211.0", "@opentelemetry/instrumentation@^0.211.0": - version "0.211.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation/-/instrumentation-0.211.0.tgz#d45e20eafa75b5d3e8a9745a6205332893c55f37" - integrity sha512-h0nrZEC/zvI994nhg7EgQ8URIHt0uDTwN90r3qQUdZORS455bbx+YebnGeEuFghUT0HlJSrLF4iHw67f+odY+Q== +"@opentelemetry/instrumentation@0.213.0", "@opentelemetry/instrumentation@^0.213.0": + version "0.213.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation/-/instrumentation-0.213.0.tgz#55362569efd0cba00aab9921a78dd20dfddf70b6" + integrity sha512-3i9NdkET/KvQomeh7UaR/F4r9P25Rx6ooALlWXPIjypcEOUxksCmVu0zA70NBJWlrMW1rPr/LRidFAflLI+s/w== dependencies: - "@opentelemetry/api-logs" "0.211.0" - import-in-the-middle "^2.0.0" + "@opentelemetry/api-logs" "0.213.0" + import-in-the-middle "^3.0.0" require-in-the-middle "^8.0.0" "@opentelemetry/instrumentation@^0.207.0": @@ -6446,13 +6449,13 @@ import-in-the-middle "^2.0.0" require-in-the-middle "^8.0.0" -"@opentelemetry/instrumentation@^0.208.0": - version "0.208.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation/-/instrumentation-0.208.0.tgz#d764f8e4329dad50804e2e98f010170c14c4ce8f" - integrity sha512-Eju0L4qWcQS+oXxi6pgh7zvE2byogAkcsVv0OjHF/97iOz1N/aKE6etSGowYkie+YA1uo6DNwdSxaaNnLvcRlA== +"@opentelemetry/instrumentation@^0.212.0": + version "0.212.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/instrumentation/-/instrumentation-0.212.0.tgz#238b6e3e2131217ff4acfe7e8e7b6ce1f0ac0ba0" + integrity sha512-IyXmpNnifNouMOe0I/gX7ENfv2ZCNdYTF0FpCsoBcpbIHzk81Ww9rQTYTnvghszCg7qGrIhNvWC8dhEifgX9Jg== dependencies: - "@opentelemetry/api-logs" "0.208.0" - import-in-the-middle "^2.0.0" + "@opentelemetry/api-logs" "0.212.0" + import-in-the-middle "^2.0.6" require-in-the-middle "^8.0.0" "@opentelemetry/redis-common@^0.38.2": @@ -6460,27 +6463,27 @@ resolved "https://registry.yarnpkg.com/@opentelemetry/redis-common/-/redis-common-0.38.2.tgz#cefa4f3e79db1cd54f19e233b7dfb56621143955" integrity sha512-1BCcU93iwSRZvDAgwUxC/DV4T/406SkMfxGqu5ojc3AvNI+I9GhV7v0J1HljsczuuhcnFLYqD5VmwVXfCGHzxA== -"@opentelemetry/resources@2.5.1", "@opentelemetry/resources@^2.5.1": - version "2.5.1" - resolved "https://registry.yarnpkg.com/@opentelemetry/resources/-/resources-2.5.1.tgz#90ccc27cea02b543f20a7db9834852ec11784c1a" - integrity sha512-BViBCdE/GuXRlp9k7nS1w6wJvY5fnFX5XvuEtWsTAOQFIO89Eru7lGW3WbfbxtCuZ/GbrJfAziXG0w0dpxL7eQ== +"@opentelemetry/resources@2.6.0", "@opentelemetry/resources@^2.6.0": + version "2.6.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/resources/-/resources-2.6.0.tgz#1a945dbb8986043d8b593c358d5d8e3de6becf5a" + integrity sha512-D4y/+OGe3JSuYUCBxtH5T9DSAWNcvCb/nQWIga8HNtXTVPQn59j0nTBAgaAXxUVBDl40mG3Tc76b46wPlZaiJQ== dependencies: - "@opentelemetry/core" "2.5.1" + "@opentelemetry/core" "2.6.0" "@opentelemetry/semantic-conventions" "^1.29.0" -"@opentelemetry/sdk-trace-base@^2.5.1": - version "2.5.1" - resolved "https://registry.yarnpkg.com/@opentelemetry/sdk-trace-base/-/sdk-trace-base-2.5.1.tgz#4f55f37e18ac3f971936d4717b6bfd43cfd72d61" - integrity sha512-iZH3Gw8cxQn0gjpOjJMmKLd9GIaNh/E3v3ST67vyzLSxHBs14HsG4dy7jMYyC5WXGdBVEcM7U/XTF5hCQxjDMw== +"@opentelemetry/sdk-trace-base@^2.6.0": + version "2.6.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/sdk-trace-base/-/sdk-trace-base-2.6.0.tgz#d7e752a0906f2bcae3c1261e224aef3e3b3746f9" + integrity sha512-g/OZVkqlxllgFM7qMKqbPV9c1DUPhQ7d4n3pgZFcrnrNft9eJXZM2TNHTPYREJBrtNdRytYyvwjgL5geDKl3EQ== dependencies: - "@opentelemetry/core" "2.5.1" - "@opentelemetry/resources" "2.5.1" + "@opentelemetry/core" "2.6.0" + "@opentelemetry/resources" "2.6.0" "@opentelemetry/semantic-conventions" "^1.29.0" -"@opentelemetry/semantic-conventions@^1.24.0", "@opentelemetry/semantic-conventions@^1.27.0", "@opentelemetry/semantic-conventions@^1.28.0", "@opentelemetry/semantic-conventions@^1.29.0", "@opentelemetry/semantic-conventions@^1.30.0", "@opentelemetry/semantic-conventions@^1.33.0", "@opentelemetry/semantic-conventions@^1.33.1", "@opentelemetry/semantic-conventions@^1.34.0", "@opentelemetry/semantic-conventions@^1.36.0", "@opentelemetry/semantic-conventions@^1.37.0", "@opentelemetry/semantic-conventions@^1.39.0": - version "1.39.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/semantic-conventions/-/semantic-conventions-1.39.0.tgz#f653b2752171411feb40310b8a8953d7e5c543b7" - integrity sha512-R5R9tb2AXs2IRLNKLBJDynhkfmx7mX0vi8NkhZb3gUkPWHn6HXk5J8iQ/dql0U3ApfWym4kXXmBDRGO+oeOfjg== +"@opentelemetry/semantic-conventions@^1.24.0", "@opentelemetry/semantic-conventions@^1.27.0", "@opentelemetry/semantic-conventions@^1.28.0", "@opentelemetry/semantic-conventions@^1.29.0", "@opentelemetry/semantic-conventions@^1.30.0", "@opentelemetry/semantic-conventions@^1.33.0", "@opentelemetry/semantic-conventions@^1.33.1", "@opentelemetry/semantic-conventions@^1.34.0", "@opentelemetry/semantic-conventions@^1.36.0", "@opentelemetry/semantic-conventions@^1.40.0": + version "1.40.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/semantic-conventions/-/semantic-conventions-1.40.0.tgz#10b2944ca559386590683392022a897eefd011d3" + integrity sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw== "@opentelemetry/sql-common@^0.41.2": version "0.41.2" @@ -6571,100 +6574,100 @@ resolved "https://registry.yarnpkg.com/@oxc-project/types/-/types-0.76.0.tgz#89ae800d774ccb344278fc17ab6c15348da8b995" integrity sha512-CH3THIrSViKal8yV/Wh3FK0pFhp40nzW1MUDCik9fNuid2D/7JJXKJnfFOAvMxInGXDlvmgT6ACAzrl47TqzkQ== -"@oxfmt/binding-android-arm-eabi@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-android-arm-eabi/-/binding-android-arm-eabi-0.32.0.tgz#0602a4ccab87439d823eef37d4e07f12780fe74c" - integrity sha512-DpVyuVzgLH6/MvuB/YD3vXO9CN/o9EdRpA0zXwe/tagP6yfVSFkFWkPqTROdqp0mlzLH5Yl+/m+hOrcM601EbA== - -"@oxfmt/binding-android-arm64@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-android-arm64/-/binding-android-arm64-0.32.0.tgz#51bfce24d1d9488d79361e34aab3850c5d249828" - integrity sha512-w1cmNXf9zs0vKLuNgyUF3hZ9VUAS1hBmQGndYJv1OmcVqStBtRTRNxSWkWM0TMkrA9UbvIvM9gfN+ib4Wy6lkQ== - -"@oxfmt/binding-darwin-arm64@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-darwin-arm64/-/binding-darwin-arm64-0.32.0.tgz#cf9899320b63562784ddbe95898b9655b8a3ee54" - integrity sha512-m6wQojz/hn94XdZugFPtdFbOvXbOSYEqPsR2gyLyID3BvcrC2QsJyT1o3gb4BZEGtZrG1NiKVGwDRLM0dHd2mg== - -"@oxfmt/binding-darwin-x64@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-darwin-x64/-/binding-darwin-x64-0.32.0.tgz#c83ea6f2a3454592a08377fdab4f35f1aa3125f5" - integrity sha512-hN966Uh6r3Erkg2MvRcrJWaB6QpBzP15rxWK/QtkUyD47eItJLsAQ2Hrm88zMIpFZ3COXZLuN3hqgSlUtvB0Xw== - -"@oxfmt/binding-freebsd-x64@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-freebsd-x64/-/binding-freebsd-x64-0.32.0.tgz#e185b75b3cd577b939d07305703e5d51ff6ffd41" - integrity sha512-g5UZPGt8tJj263OfSiDGdS54HPa0KgFfspLVAUivVSdoOgsk6DkwVS9nO16xQTDztzBPGxTvrby8WuufF0g86Q== - -"@oxfmt/binding-linux-arm-gnueabihf@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-arm-gnueabihf/-/binding-linux-arm-gnueabihf-0.32.0.tgz#3e3335d427fb8ec79bbcbdd56945cc80bc06649c" - integrity sha512-F4ZY83/PVQo9ZJhtzoMqbmjqEyTVEZjbaw4x1RhzdfUhddB41ZB2Vrt4eZi7b4a4TP85gjPRHgQBeO0c1jbtaw== - -"@oxfmt/binding-linux-arm-musleabihf@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-arm-musleabihf/-/binding-linux-arm-musleabihf-0.32.0.tgz#36add5ff6b59c940356c0ec9988c2c1ea11a6a6a" - integrity sha512-olR37eG16Lzdj9OBSvuoT5RxzgM5xfQEHm1OEjB3M7Wm4KWa5TDWIT13Aiy74GvAN77Hq1+kUKcGVJ/0ynf75g== - -"@oxfmt/binding-linux-arm64-gnu@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-0.32.0.tgz#d8c07a893ee731717cdd833ca533769819255693" - integrity sha512-eZhk6AIjRCDeLoXYBhMW7qq/R1YyVi+tGnGfc3kp7AZQrMsFaWtP/bgdCJCTNXMpbMwymtVz0qhSQvR5w2sKcg== - -"@oxfmt/binding-linux-arm64-musl@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-arm64-musl/-/binding-linux-arm64-musl-0.32.0.tgz#1fd4fff7907c56400c81c41052ef5674f359289d" - integrity sha512-UYiqO9MlipntFbdbUKOIo84vuyzrK4TVIs7Etat91WNMFSW54F6OnHq08xa5ZM+K9+cyYMgQPXvYCopuP+LyKw== - -"@oxfmt/binding-linux-ppc64-gnu@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-ppc64-gnu/-/binding-linux-ppc64-gnu-0.32.0.tgz#cb5a36873ab77848bbb140e7104108f79f9c52a3" - integrity sha512-IDH/fxMv+HmKsMtsjEbXqhScCKDIYp38sgGEcn0QKeXMxrda67PPZA7HMfoUwEtFUG+jsO1XJxTrQsL+kQ90xQ== - -"@oxfmt/binding-linux-riscv64-gnu@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-riscv64-gnu/-/binding-linux-riscv64-gnu-0.32.0.tgz#999da45a089574dc5294caa3e6066846310c1dbd" - integrity sha512-bQFGPDa0buYWJFeK2I7ah8wRZjrAgamaG2OAGv+Ua5UMYEnHxmHcv+r8lWUUrwP2oqQGvp1SB8JIVtBbYuAueQ== - -"@oxfmt/binding-linux-riscv64-musl@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-riscv64-musl/-/binding-linux-riscv64-musl-0.32.0.tgz#bae99e56d3b133157500937fa09c104bd3cd4060" - integrity sha512-3vFp9DW1ItEKWltADzCFqG5N7rYFToT4ztlhg8wALoo2E2VhveLD88uAF4FF9AxD9NhgHDGmPCV+WZl/Qlj8cQ== - -"@oxfmt/binding-linux-s390x-gnu@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-s390x-gnu/-/binding-linux-s390x-gnu-0.32.0.tgz#f55fa22c3d0cf57f0075f64368fad9c2f135cdde" - integrity sha512-Fub2y8S9ImuPzAzpbgkoz/EVTWFFBolxFZYCMRhRZc8cJZI2gl/NlZswqhvJd/U0Jopnwgm/OJ2x128vVzFFWA== - -"@oxfmt/binding-linux-x64-gnu@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-x64-gnu/-/binding-linux-x64-gnu-0.32.0.tgz#bee3319311b05d8c12c5f57d53268a2283bb2e8e" - integrity sha512-XufwsnV3BF81zO2ofZvhT4FFaMmLTzZEZnC9HpFz/quPeg9C948+kbLlZnsfjmp+1dUxKMCpfmRMqOfF4AOLsA== - -"@oxfmt/binding-linux-x64-musl@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-x64-musl/-/binding-linux-x64-musl-0.32.0.tgz#1bc25dd853aa100573cba92bec3ec18089bc609d" - integrity sha512-u2f9tC2qYfikKmA2uGpnEJgManwmk0ZXWs5BB4ga4KDu2JNLdA3i634DGHeMLK9wY9+iRf3t7IYpgN3OVFrvDw== - -"@oxfmt/binding-openharmony-arm64@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-openharmony-arm64/-/binding-openharmony-arm64-0.32.0.tgz#192c37c05e4ae09f432e7940c8e8da55fd87ceee" - integrity sha512-5ZXb1wrdbZ1YFXuNXNUCePLlmLDy4sUt4evvzD4Cgumbup5wJgS9PIe5BOaLywUg9f1wTH6lwltj3oT7dFpIGA== - -"@oxfmt/binding-win32-arm64-msvc@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-0.32.0.tgz#a3447577537b5784cf4574dd551b34543344372a" - integrity sha512-IGSMm/Agq+IA0++aeAV/AGPfjcBdjrsajB5YpM3j7cMcwoYgUTi/k2YwAmsHH3ueZUE98pSM/Ise2J7HtyRjOA== - -"@oxfmt/binding-win32-ia32-msvc@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-0.32.0.tgz#4c73f57a24bdbbefd76409aecc982e5b9070b249" - integrity sha512-H/9gsuqXmceWMsVoCPZhtJG2jLbnBeKr7xAXm2zuKpxLVF7/2n0eh7ocOLB6t+L1ARE76iORuUsRMnuGjj8FjQ== - -"@oxfmt/binding-win32-x64-msvc@0.32.0": - version "0.32.0" - resolved "https://registry.yarnpkg.com/@oxfmt/binding-win32-x64-msvc/-/binding-win32-x64-msvc-0.32.0.tgz#9e22eebbb6f0be9e5a6dc8e2c96e491b01d7cdf1" - integrity sha512-fF8VIOeligq+mA6KfKvWtFRXbf0EFy73TdR6ZnNejdJRM8VWN1e3QFhYgIwD7O8jBrQsd7EJbUpkAr/YlUOokg== +"@oxfmt/binding-android-arm-eabi@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-android-arm-eabi/-/binding-android-arm-eabi-0.38.0.tgz#23b79ba50e6829cc6ed7648015dc4bd258554f4a" + integrity sha512-lTN4//sgYywK8ulQo7a/EZVzOTGomGQv2IG/7tMYdqTV3xN3QTqWpXcZBGUzaicC4B882N+5zJLYZ37IWfUMcg== + +"@oxfmt/binding-android-arm64@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-android-arm64/-/binding-android-arm64-0.38.0.tgz#9624e881c0e506b124655d513180f183ff08b6dd" + integrity sha512-XbVgqR1WsIcCkfxwh2tdg3M1MWgR23YOboW2nbB8ab0gInNNLGy7cIAdr78XaoG/bGdaF4488XRhuGWq67xrzA== + +"@oxfmt/binding-darwin-arm64@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-darwin-arm64/-/binding-darwin-arm64-0.38.0.tgz#16c3ce1a233d581d7d308421412b89e9a69c63c6" + integrity sha512-AHb6zUzWaSJra7lnPkI+Sqwu33bVWVTwCozcw9QTX8vwHaI1+5d5STqBcsJf63eSuRVRlflwMS4erlAPh3fXZw== + +"@oxfmt/binding-darwin-x64@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-darwin-x64/-/binding-darwin-x64-0.38.0.tgz#ad0338aaf5bf6d6421d0a88a137cbac328d67c48" + integrity sha512-VmlmTyn7LL7Xi5htjosxGpJJHf3Drx5mgXxKE8+NT10uBXTaG3FHpRYhW3Zg5Qp7omH92Lj1+IHYqQG/HZpLnw== + +"@oxfmt/binding-freebsd-x64@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-freebsd-x64/-/binding-freebsd-x64-0.38.0.tgz#68b9406aa0195b172148781ca214ba933310053f" + integrity sha512-LynMLRqaUEAV6n4svTFanFOAnJ9D6aCCfymJ2yhMSh5fYFgCCO4q5LzPV2nATKKoyPocSErFSmYREsOFbkIlCg== + +"@oxfmt/binding-linux-arm-gnueabihf@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-arm-gnueabihf/-/binding-linux-arm-gnueabihf-0.38.0.tgz#05700742c3cbce7b6116724f34fe1cd784ae2da5" + integrity sha512-HRRZtOXcss5+bGqQcYahILgt14+Iu/Olf6fnoKq5ctOzU21PGHVB+zuocgt+/+ixoMLV1Drvok3ns7QwnLwNTA== + +"@oxfmt/binding-linux-arm-musleabihf@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-arm-musleabihf/-/binding-linux-arm-musleabihf-0.38.0.tgz#ea3758fa8196a179ee824196c3c97dc8f6617f0b" + integrity sha512-kScH8XnH7TRUckMOSZ5115Vvr2CQq+iPsuXPEzwUXSxh+gDLzt+GsXuvCsaPxp1KP+dQj88VrIjeQ4V0f9NRKw== + +"@oxfmt/binding-linux-arm64-gnu@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-0.38.0.tgz#8b44ea6821839250488eacc253b28437510647c2" + integrity sha512-PUVn/vGsMs83eLhNXLNjR+Qw/EPiNxU9Tx+p+aZBK0RT9/k6RNgh/O4F1TxS4tdISmf3SSgjdnMOVW3ZfQZ2mA== + +"@oxfmt/binding-linux-arm64-musl@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-arm64-musl/-/binding-linux-arm64-musl-0.38.0.tgz#43252372774c93e050f549d6f4dba57153d075a8" + integrity sha512-LhtmaLCMGtAIEtaTBAoKLF3QVt+IDKIjdEZvsf0msLeTUFKxyoTNScYBXbkmvqGrm37vV0JjTPvm+OaSh3np5A== + +"@oxfmt/binding-linux-ppc64-gnu@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-ppc64-gnu/-/binding-linux-ppc64-gnu-0.38.0.tgz#e33ee74ba9157d5aaeaffa0310a271dd5074967f" + integrity sha512-tO6tPaS21o0MaRqmOi9e3sDotlW4c+1gCx4SwdrfDXm3Y1vmIZWh0qB6t/Xh77bIGVr/4fC95eKOhKLPGwdL+Q== + +"@oxfmt/binding-linux-riscv64-gnu@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-riscv64-gnu/-/binding-linux-riscv64-gnu-0.38.0.tgz#a73e5e613ba6685897b9285918a3dca4da173864" + integrity sha512-djEqwFUHczstFKp5aT43TuRWxyKZSkIZUfGXIEKa0srmIAt1CXQO5O8xLgNG4SGkXTRB1domFfCE68t9SkSmfA== + +"@oxfmt/binding-linux-riscv64-musl@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-riscv64-musl/-/binding-linux-riscv64-musl-0.38.0.tgz#1ad616a8277c402843dd910d40501ee39e4bd11a" + integrity sha512-76EgMMtS6sIE+9Pl9q2GZgZpbZSzqtjQhUUIWl0RVNfHg66tstdJMhY2LXESjDYhc5vFYt9qdQNM0w0zg3onPw== + +"@oxfmt/binding-linux-s390x-gnu@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-s390x-gnu/-/binding-linux-s390x-gnu-0.38.0.tgz#b32ef8cefba97336c539fff75f2f70a88f0821f8" + integrity sha512-JYNr3i9z/YguZg088kopjvz49hDxTEL193mYL2/02uq/6BLlQRMaKrePEITTHm/vUu4ZquAKgu4mDib6pGWdyg== + +"@oxfmt/binding-linux-x64-gnu@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-x64-gnu/-/binding-linux-x64-gnu-0.38.0.tgz#994e20b2c0df6fb58ac75fff906c886fa6e65050" + integrity sha512-Lf+/Keaw1kBKx0U3HT5PsA7/3VO4ZOmaqo4sWaeAJ6tYeX8h/2IZcEONhjry6T4BETza78z6xI3Qx+18QZix6A== + +"@oxfmt/binding-linux-x64-musl@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-linux-x64-musl/-/binding-linux-x64-musl-0.38.0.tgz#192e3e3013cfee0609618e132d8d735230a27a3e" + integrity sha512-4O6sf6OQuz1flk0TDrrtmXOVO3letA7fYe2IEAiJOQvKhJcMU08NiIVODQjMGZ6IQh1q91B+TlliDfbsYalw8A== + +"@oxfmt/binding-openharmony-arm64@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-openharmony-arm64/-/binding-openharmony-arm64-0.38.0.tgz#b491b6edf3fd75c5b54fdeacbf20149d1aa843f8" + integrity sha512-GNocbjYnielmKVBk+r/2Vc4E3oTsAO4+5gRuroUVx86Jv+mpD+hyFkf260/by0YtpF1ipqyxR8chOSgRQvD2zQ== + +"@oxfmt/binding-win32-arm64-msvc@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-0.38.0.tgz#6385fc7d379e4ce89bf217d17f9cc613c9f9b2cc" + integrity sha512-AwgjBHRxPckbazLpECuPOSzYlppYR1CBeUSuzZuClsmTnlZA9O1MexCEP9CROe03Yo1xBGvYtiCjwKZMBChGkg== + +"@oxfmt/binding-win32-ia32-msvc@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-0.38.0.tgz#d5d8c5e8723934e3778eecb89f773e7c0cbe2953" + integrity sha512-c3u+ak6Zrh1g6pM2TgNVvOgkm7q1XaIX+5Mgxvu38ozJ5OfM8c7HZk3glMdBzlTD2uK0sSfgBq1kuXwCe1NOGg== + +"@oxfmt/binding-win32-x64-msvc@0.38.0": + version "0.38.0" + resolved "https://registry.yarnpkg.com/@oxfmt/binding-win32-x64-msvc/-/binding-win32-x64-msvc-0.38.0.tgz#f53ae28dbcca1133dcdea8d6fe93526b1dcfd76f" + integrity sha512-wud1Hz0D2hYrhk6exxQQndn1htcA28wAcFb1vtP3ZXSzPFtMvc7ag/VNPv6nz6mDzM8X660jUwGEac99QcrVsA== "@oxlint-tsgolint/darwin-arm64@0.16.0": version "0.16.0" @@ -6696,100 +6699,100 @@ resolved "https://registry.yarnpkg.com/@oxlint-tsgolint/win32-x64/-/win32-x64-0.16.0.tgz#861e5c7df0108212e4b61ef6ae38df22104028a1" integrity sha512-1ufk8cgktXJuJZHKF63zCHAkaLMwZrEXnZ89H2y6NO85PtOXqu4zbdNl0VBpPP3fCUuUBu9RvNqMFiv0VsbXWA== -"@oxlint/binding-android-arm-eabi@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-android-arm-eabi/-/binding-android-arm-eabi-1.50.0.tgz#c1ccae0cebb11aad65f311ea9d559c961beb32e4" - integrity sha512-G7MRGk/6NCe+L8ntonRdZP7IkBfEpiZ/he3buLK6JkLgMHgJShXZ+BeOwADmspXez7U7F7L1Anf4xLSkLHiGTg== - -"@oxlint/binding-android-arm64@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-android-arm64/-/binding-android-arm64-1.50.0.tgz#6bfcfd3bb31fdfa8c03e55362df2de3d27261117" - integrity sha512-GeSuMoJWCVpovJi/e3xDSNgjeR8WEZ6MCXL6EtPiCIM2NTzv7LbflARINTXTJy2oFBYyvdf/l2PwHzYo6EdXvg== - -"@oxlint/binding-darwin-arm64@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-darwin-arm64/-/binding-darwin-arm64-1.50.0.tgz#374bf2f4bf6ac326c74494fef1e32b98c3b62254" - integrity sha512-w3SY5YtxGnxCHPJ8Twl3KmS9oja1gERYk3AMoZ7Hv8P43ZtB6HVfs02TxvarxfL214Tm3uzvc2vn+DhtUNeKnw== - -"@oxlint/binding-darwin-x64@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-darwin-x64/-/binding-darwin-x64-1.50.0.tgz#693a456ec3e9016e4fe9697b32bf6c45aaf5b58b" - integrity sha512-hNfogDqy7tvmllXKBSlHo6k5x7dhTUVOHbMSE15CCAcXzmqf5883aPvBYPOq9AE7DpDUQUZ1kVE22YbiGW+tuw== - -"@oxlint/binding-freebsd-x64@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-freebsd-x64/-/binding-freebsd-x64-1.50.0.tgz#fd7d19910fdf5b91f104413f9e0ecbee667caba0" - integrity sha512-ykZevOWEyu0nsxolA911ucxpEv0ahw8jfEeGWOwwb/VPoE4xoexuTOAiPNlWZNJqANlJl7yp8OyzCtXTUAxotw== - -"@oxlint/binding-linux-arm-gnueabihf@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-arm-gnueabihf/-/binding-linux-arm-gnueabihf-1.50.0.tgz#24aaa9bd814d86ff7dd2c8f49519449f5ae71d45" - integrity sha512-hif3iDk7vo5GGJ4OLCCZAf2vjnU9FztGw4L0MbQL0M2iY9LKFtDMMiQAHmkF0PQGQMVbTYtPdXCLKVgdkiqWXQ== - -"@oxlint/binding-linux-arm-musleabihf@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-arm-musleabihf/-/binding-linux-arm-musleabihf-1.50.0.tgz#94df0affcaa5207bb65b648a62a08e1845ebb294" - integrity sha512-dVp9iSssiGAnTNey2Ruf6xUaQhdnvcFOJyRWd/mu5o2jVbFK15E5fbWGeFRfmuobu5QXuROtFga44+7DOS3PLg== - -"@oxlint/binding-linux-arm64-gnu@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-1.50.0.tgz#3ab39fd260f80330f517153873a744847095b874" - integrity sha512-1cT7yz2HA910CKA9NkH1ZJo50vTtmND2fkoW1oyiSb0j6WvNtJ0Wx2zoySfXWc/c+7HFoqRK5AbEoL41LOn9oA== - -"@oxlint/binding-linux-arm64-musl@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-arm64-musl/-/binding-linux-arm64-musl-1.50.0.tgz#f745226550c880cf522e539d1056c90a60c5b4c1" - integrity sha512-++B3k/HEPFVlj89cOz8kWfQccMZB/aWL9AhsW7jPIkG++63Mpwb2cE9XOEsd0PATbIan78k2Gky+09uWM1d/gQ== - -"@oxlint/binding-linux-ppc64-gnu@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-ppc64-gnu/-/binding-linux-ppc64-gnu-1.50.0.tgz#1d683a20b1e818025a3a9bcece61b22cd67e408c" - integrity sha512-Z9b/KpFMkx66w3gVBqjIC1AJBTZAGoI9+U+K5L4QM0CB/G0JSNC1es9b3Y0Vcrlvtdn8A+IQTkYjd/Q0uCSaZw== - -"@oxlint/binding-linux-riscv64-gnu@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-riscv64-gnu/-/binding-linux-riscv64-gnu-1.50.0.tgz#65dd7b24e2adcb984224bce37b95d5fd66c489f4" - integrity sha512-jvmuIw8wRSohsQlFNIST5uUwkEtEJmOQYr33bf/K2FrFPXHhM4KqGekI3ShYJemFS/gARVacQFgBzzJKCAyJjg== - -"@oxlint/binding-linux-riscv64-musl@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-riscv64-musl/-/binding-linux-riscv64-musl-1.50.0.tgz#087a774de599161050d89874688cede00304f720" - integrity sha512-x+UrN47oYNh90nmAAyql8eQaaRpHbDPu5guasDg10+OpszUQ3/1+1J6zFMmV4xfIEgTcUXG/oI5fxJhF4eWCNA== - -"@oxlint/binding-linux-s390x-gnu@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-s390x-gnu/-/binding-linux-s390x-gnu-1.50.0.tgz#4ff4378690868372d1d83c65fc1e48594442d17a" - integrity sha512-i/JLi2ljLUIVfekMj4ISmdt+Hn11wzYUdRRrkVUYsCWw7zAy5xV7X9iA+KMyM156LTFympa7s3oKBjuCLoTAUQ== - -"@oxlint/binding-linux-x64-gnu@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-x64-gnu/-/binding-linux-x64-gnu-1.50.0.tgz#93898f4d466639cef6cb6b6286c849cb21a4ee34" - integrity sha512-/C7brhn6c6UUPccgSPCcpLQXcp+xKIW/3sji/5VZ8/OItL3tQ2U7KalHz887UxxSQeEOmd1kY6lrpuwFnmNqOA== - -"@oxlint/binding-linux-x64-musl@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-x64-musl/-/binding-linux-x64-musl-1.50.0.tgz#8b379b4a775ac1d20aa339f9065eb3e21ef33578" - integrity sha512-oDR1f+bGOYU8LfgtEW8XtotWGB63ghtcxk5Jm6IDTCk++rTA/IRMsjOid2iMd+1bW+nP9Mdsmcdc7VbPD3+iyQ== - -"@oxlint/binding-openharmony-arm64@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-openharmony-arm64/-/binding-openharmony-arm64-1.50.0.tgz#b5f7ea566a053436eb72ab9da9f00c89875d4b0a" - integrity sha512-4CmRGPp5UpvXyu4jjP9Tey/SrXDQLRvZXm4pb4vdZBxAzbFZkCyh0KyRy4txld/kZKTJlW4TO8N1JKrNEk+mWw== - -"@oxlint/binding-win32-arm64-msvc@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-1.50.0.tgz#49c60d7576e24d582ea79b91a570cbcf11b31e9c" - integrity sha512-Fq0M6vsGcFsSfeuWAACDhd5KJrO85ckbEfe1EGuBj+KPyJz7KeWte2fSFrFGmNKNXyhEMyx4tbgxiWRujBM2KQ== - -"@oxlint/binding-win32-ia32-msvc@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-1.50.0.tgz#8c1759d1ad98fef91f02c772ea017ebf8ceb521a" - integrity sha512-qTdWR9KwY/vxJGhHVIZG2eBOhidOQvOwzDxnX+jhW/zIVacal1nAhR8GLkiywW8BIFDkQKXo/zOfT+/DY+ns/w== - -"@oxlint/binding-win32-x64-msvc@1.50.0": - version "1.50.0" - resolved "https://registry.yarnpkg.com/@oxlint/binding-win32-x64-msvc/-/binding-win32-x64-msvc-1.50.0.tgz#bb35320c54c3464a7c624ee09e13928c3ebdacd3" - integrity sha512-682t7npLC4G2Ca+iNlI9fhAKTcFPYYXJjwoa88H4q+u5HHHlsnL/gHULapX3iqp+A8FIJbgdylL5KMYo2LaluQ== +"@oxlint/binding-android-arm-eabi@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-android-arm-eabi/-/binding-android-arm-eabi-1.53.0.tgz#c42286b0a96d31fedc7b37183518c9e4054f6330" + integrity sha512-JC89/jAx4d2zhDIbK8MC4L659FN1WiMXMBkNg7b33KXSkYpUgcbf+0nz7+EPRg+VwWiZVfaoFkNHJ7RXYb5Neg== + +"@oxlint/binding-android-arm64@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-android-arm64/-/binding-android-arm64-1.53.0.tgz#b3b0906247450cd8a3840ee330e5b7cc97626db2" + integrity sha512-CY+pZfi+uyeU7AwFrEnjsNT+VfxYmKLMuk7bVxArd8f+09hQbJb8f7C7EpvTfNqrCK1J8zZlaYI4LltmEctgbQ== + +"@oxlint/binding-darwin-arm64@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-darwin-arm64/-/binding-darwin-arm64-1.53.0.tgz#b261b5f5a452dc477c1c576b9c0bc23a305da17d" + integrity sha512-0aqsC4HDQ94oI6kMz64iaOJ1f3bCVArxvaHJGOScBvFz6CcQedXi5b70Xg09CYjKNaHA56dW0QJfoZ/111kz1A== + +"@oxlint/binding-darwin-x64@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-darwin-x64/-/binding-darwin-x64-1.53.0.tgz#3daf9e22dd2e35a24c982ff42fcb701d513075be" + integrity sha512-e+KvuaWtnisyWojO/t5qKDbp2dvVpg+1dl4MGnTb21QpY4+4+9Y1XmZPaztcA2XNvy4BIaXFW+9JH9tMpSBqUg== + +"@oxlint/binding-freebsd-x64@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-freebsd-x64/-/binding-freebsd-x64-1.53.0.tgz#eee6be65be588fc91a785a099b53275f52f7fd7e" + integrity sha512-hpU0ZHVeblFjmZDfgi9BxhhCpURh0KjoFy5V+Tvp9sg/fRcnMUEfaJrgz+jQfOX4jctlVWrAs1ANs91+5iV+zA== + +"@oxlint/binding-linux-arm-gnueabihf@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-arm-gnueabihf/-/binding-linux-arm-gnueabihf-1.53.0.tgz#0751ed24c4a370a9cbdbfcd5583293f13e79d008" + integrity sha512-ccKxOpw+X4xa2pO+qbTOpxQ2x1+Ag3ViRQMnWt3gHp1LcpNgS1xd6GYc3OvehmHtrXqEV3YGczZ0I1qpBB4/2A== + +"@oxlint/binding-linux-arm-musleabihf@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-arm-musleabihf/-/binding-linux-arm-musleabihf-1.53.0.tgz#a795b821695d7af090b9ce036d99d0ce7b43e645" + integrity sha512-UBkBvmzSmlyH2ZObQMDKW/TuyTmUtP/XClPUyU2YLwj0qLopZTZxnDz4VG5d3wz1HQuZXO0o1QqsnQUW1v4a6Q== + +"@oxlint/binding-linux-arm64-gnu@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-1.53.0.tgz#805fb2ae2d480687a12c942f78069d6ccf9399bc" + integrity sha512-PQJJ1izoH9p61las6rZ0BWOznAhTDMmdUPL2IEBLuXFwhy2mSloYHvRkk39PSYJ1DyG+trqU5Z9ZbtHSGH6plg== + +"@oxlint/binding-linux-arm64-musl@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-arm64-musl/-/binding-linux-arm64-musl-1.53.0.tgz#3664aad7e24841a68525a6220e64a43de1d441c3" + integrity sha512-GXI1o4Thn/rtnRIL38BwrDMwVcUbIHKCsOixIWf/CkU3fCG3MXFzFTtDMt+34ik0Qk452d8kcpksL0w/hUkMZA== + +"@oxlint/binding-linux-ppc64-gnu@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-ppc64-gnu/-/binding-linux-ppc64-gnu-1.53.0.tgz#7ffa2e40aae448ee3728f158612f02428489e98c" + integrity sha512-Uahk7IVs2yBamCgeJ3XKpKT9Vh+de0pDKISFKnjEcI3c/w2CFHk1+W6Q6G3KI56HGwE9PWCp6ayhA9whXWkNIQ== + +"@oxlint/binding-linux-riscv64-gnu@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-riscv64-gnu/-/binding-linux-riscv64-gnu-1.53.0.tgz#016b145e0abd4a24149558873525b43609edb080" + integrity sha512-sWtcU9UkrKMWsGKdFy8R6jkm9Q0VVG1VCpxVuh0HzRQQi3ENI1Nh5CkpsdfUs2MKRcOoHKbXqTscunuXjhxoxQ== + +"@oxlint/binding-linux-riscv64-musl@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-riscv64-musl/-/binding-linux-riscv64-musl-1.53.0.tgz#16d7a1cc60f6d85aab3eea1efff84ea1450789f6" + integrity sha512-aXew1+HDvCdExijX/8NBVC854zJwxhKP3l9AHFSHQNo4EanlHtzDMIlIvP3raUkL0vXtFCkTFYezzU5HjstB8A== + +"@oxlint/binding-linux-s390x-gnu@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-s390x-gnu/-/binding-linux-s390x-gnu-1.53.0.tgz#edbf548da82d4c9ae023a031839425912ac99e81" + integrity sha512-rVpyBSqPGou9sITcsoXqUoGBUH74bxYLYOAGUqN599Zu6BQBlBU9hh3bJQ/20D1xrhhrsbiCpVPvXpLPM5nL1w== + +"@oxlint/binding-linux-x64-gnu@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-x64-gnu/-/binding-linux-x64-gnu-1.53.0.tgz#fa239fe242fd76b45d9a771096215abb82ebd325" + integrity sha512-eOyeQ8qFQ2geXmlWJuXAOaek0hFhbMLlYsU457NMLKDRoC43Xf+eDPZ9Yk0n9jDaGJ5zBl/3Dy8wo41cnIXuLA== + +"@oxlint/binding-linux-x64-musl@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-linux-x64-musl/-/binding-linux-x64-musl-1.53.0.tgz#c9f88972e043463ec7fa8f855d6c92a66bc263c0" + integrity sha512-S6rBArW/zD1tob8M9PwKYrRmz+j1ss1+wjbRAJCWKd7TC3JB6noDiA95pIj9zOZVVp04MIzy5qymnYusrEyXzg== + +"@oxlint/binding-openharmony-arm64@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-openharmony-arm64/-/binding-openharmony-arm64-1.53.0.tgz#d0df48d35bba716145dbed7529a34ce1829be49e" + integrity sha512-sd/A0Ny5sN0D/MJtlk7w2jGY4bJQou7gToa9WZF7Sj6HTyVzvlzKJWiOHfr4SulVk4ndiFQ8rKmF9rXP0EcF3A== + +"@oxlint/binding-win32-arm64-msvc@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-1.53.0.tgz#f0a5f856e911112820adbb16240add6adbe28bff" + integrity sha512-QC3q7b51Er/ZurEFcFzc7RpQ/YEoEBLJuCp3WoOzhSHHH/nkUKFy+igOxlj1z3LayhEZPDQQ7sXvv2PM2cdG3Q== + +"@oxlint/binding-win32-ia32-msvc@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-win32-ia32-msvc/-/binding-win32-ia32-msvc-1.53.0.tgz#f43b0fc12f68620defb373684de91dbca7fbff49" + integrity sha512-3OvLgOqwd705hWHV2i8ni80pilvg6BUgpC2+xtVu++e/q28LKVohGh5J5QYJOrRMfWmxK0M/AUu43vUw62LAKQ== + +"@oxlint/binding-win32-x64-msvc@1.53.0": + version "1.53.0" + resolved "https://registry.yarnpkg.com/@oxlint/binding-win32-x64-msvc/-/binding-win32-x64-msvc-1.53.0.tgz#0e0cc062024a7a58cdaa17a3fa0cefb4a3963a25" + integrity sha512-xTiOkntexCdJytZ7ArIIgl3vGW5ujMM3sJNM7/+iqGAVJagCqjFFWn68HRWRLeyT66c95uR+CeFmQFI6mLQqDw== "@parcel/watcher-android-arm64@2.5.1": version "2.5.1" @@ -7008,10 +7011,10 @@ dependencies: "@prisma/debug" "6.15.0" -"@prisma/instrumentation@7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@prisma/instrumentation/-/instrumentation-7.2.0.tgz#9409a436d8f98e8950c8659aeeba045c4a07e891" - integrity sha512-Rh9Z4x5kEj1OdARd7U18AtVrnL6rmLSI0qYShaB4W7Wx5BKbgzndWF+QnuzMb7GLfVdlT5aYCXoPQVYuYtVu0g== +"@prisma/instrumentation@7.4.2": + version "7.4.2" + resolved "https://registry.yarnpkg.com/@prisma/instrumentation/-/instrumentation-7.4.2.tgz#b05e814d0647343febd26a8ccb039d27ccc69eca" + integrity sha512-r9JfchJF1Ae6yAxcaLu/V1TGqBhAuSDe3mRNOssBfx1rMzfZ4fdNvrgUBwyb/TNTGXFxlH9AZix5P257x07nrg== dependencies: "@opentelemetry/instrumentation" "^0.207.0" @@ -8953,7 +8956,7 @@ resolved "https://registry.yarnpkg.com/@testing-library/user-event/-/user-event-14.5.2.tgz#db7257d727c891905947bd1c1a99da20e03c2ebd" integrity sha512-YAh82Wh4TIrxYLmfGcixwD18oIjyC1pFQC2Y01F2lzV2HTMiYrI0nze0FD0ocB//CKS/7jIUgae+adPqxK5yCQ== -"@tokenizer/inflate@^0.2.6", "@tokenizer/inflate@^0.2.7": +"@tokenizer/inflate@^0.2.7": version "0.2.7" resolved "https://registry.yarnpkg.com/@tokenizer/inflate/-/inflate-0.2.7.tgz#32dd9dfc9abe457c89b3d9b760fc0690c85a103b" integrity sha512-MADQgmZT1eKjp06jpI2yozxaU9uVs4GzzgSL+uEq7bVcJ9V1ZXQkeGNql1fsSI0gMy1vhvNTNbUqrx+pZfJVmg== @@ -8962,6 +8965,14 @@ fflate "^0.8.2" token-types "^6.0.0" +"@tokenizer/inflate@^0.4.1": + version "0.4.1" + resolved "https://registry.yarnpkg.com/@tokenizer/inflate/-/inflate-0.4.1.tgz#fa6cdb8366151b3cc8426bf9755c1ea03a2fba08" + integrity sha512-2mAv+8pkG6GIZiF1kNg1jAjh27IDxEPKwdGul3snfztFerfPGI1LjDezZp3i7BElXompqEtPmoPx6c2wgtWsOA== + dependencies: + debug "^4.4.3" + token-types "^6.1.1" + "@tokenizer/token@^0.3.0": version "0.3.0" resolved "https://registry.yarnpkg.com/@tokenizer/token/-/token-0.3.0.tgz#fe98a93fe789247e998c75e74e9c7c63217aa276" @@ -11720,7 +11731,7 @@ available-typed-arrays@^1.0.7: dependencies: possible-typed-array-names "^1.0.0" -aws-ssl-profiles@^1.1.1: +aws-ssl-profiles@^1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/aws-ssl-profiles/-/aws-ssl-profiles-1.1.2.tgz#157dd77e9f19b1d123678e93f120e6f193022641" integrity sha512-NZKeq9AfyQvEeNlN0zSYAaWrmBffJh3IELMZfRpJVWgrpEbtEpnjvzqBPf+mxoI287JohRDoa+/nsfqqiZmF6g== @@ -12178,10 +12189,10 @@ bl@^5.0.0: inherits "^2.0.4" readable-stream "^3.4.0" -bl@^6.0.11: - version "6.0.16" - resolved "https://registry.yarnpkg.com/bl/-/bl-6.0.16.tgz#29b190f1a754e2d168de3dc8c74ed8d12bf78e6e" - integrity sha512-V/kz+z2Mx5/6qDfRCilmrukUXcXuCoXKg3/3hDvzKKoSUx8CJKudfIoT29XZc3UE9xBvxs5qictiHdprwtteEg== +bl@^6.1.4: + version "6.1.6" + resolved "https://registry.yarnpkg.com/bl/-/bl-6.1.6.tgz#b40f3aea6963c6742616a957efb742c4fb87ecbb" + integrity sha512-jLsPgN/YSvPUg9UX0Kd73CXpm2Psg9FxMeCSXnk3WBO3CMT10JMwijubhGfHCnFu6TPn1ei3b975dxv7K2pWVg== dependencies: "@types/readable-stream" "^4.0.0" buffer "^6.0.3" @@ -15028,6 +15039,14 @@ effect@3.16.12: "@standard-schema/spec" "^1.0.0" fast-check "^3.23.1" +effect@^3.19.19: + version "3.19.19" + resolved "https://registry.yarnpkg.com/effect/-/effect-3.19.19.tgz#643a5a4b7445cc924a28270bc6cd1a5c8facd27e" + integrity sha512-Yc8U/SVXo2dHnaP7zNBlAo83h/nzSJpi7vph6Hzyl4ulgMBIgPmz3UzOjb9sBgpFE00gC0iETR244sfXDNLHRg== + dependencies: + "@standard-schema/spec" "^1.0.0" + fast-check "^3.23.1" + ejs@^3.1.7: version "3.1.8" resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.8.tgz#758d32910c78047585c7ef1f92f9ee041c1c190b" @@ -17277,14 +17296,14 @@ file-type@21.0.0: token-types "^6.0.0" uint8array-extras "^1.4.0" -file-type@^20.4.1: - version "20.5.0" - resolved "https://registry.yarnpkg.com/file-type/-/file-type-20.5.0.tgz#616e90564e6ffabab22ad9763e28efcc5c95aee0" - integrity sha512-BfHZtG/l9iMm4Ecianu7P8HRD2tBHLtjXinm4X62XBOYzi7CYA7jyqfJzOvXHqzVrVPYqBo2/GvbARMaaJkKVg== +file-type@^21.3.1: + version "21.3.1" + resolved "https://registry.yarnpkg.com/file-type/-/file-type-21.3.1.tgz#a49e103e3491e0e52d13f5b2d99d4d7204a34a5e" + integrity sha512-SrzXX46I/zsRDjTb82eucsGg0ODq2NpGDp4HcsFKApPy8P8vACjpJRDoGGMfEzhFC0ry61ajd7f72J3603anBA== dependencies: - "@tokenizer/inflate" "^0.2.6" - strtok3 "^10.2.0" - token-types "^6.0.0" + "@tokenizer/inflate" "^0.4.1" + strtok3 "^10.3.4" + token-types "^6.1.1" uint8array-extras "^1.4.0" file-uri-to-path@1.0.0: @@ -18755,10 +18774,10 @@ homedir-polyfill@^1.0.1: dependencies: parse-passwd "^1.0.0" -hono@^4.12.5: - version "4.12.5" - resolved "https://registry.yarnpkg.com/hono/-/hono-4.12.5.tgz#8c16209b35040025d3f110d18f3b821de6cab00f" - integrity sha512-3qq+FUBtlTHhtYxbxheZgY8NIFnkkC/MR8u5TTsr7YZ3wixryQ3cCwn3iZbg8p8B88iDBBAYSfZDS75t8MN7Vg== +hono@^4.12.7: + version "4.12.7" + resolved "https://registry.yarnpkg.com/hono/-/hono-4.12.7.tgz#ca000956e965c2b3d791e43540498e616d6c6442" + integrity sha512-jq9l1DM0zVIvsm3lv9Nw9nlJnMNPOcAtsbsgiUhWcFzPE99Gvo6yRTlszSLLYacMeQ6quHD6hMfId8crVHvexw== hookable@^5.5.3: version "5.5.3" @@ -19038,7 +19057,7 @@ iconv-lite@^0.4.24, iconv-lite@^0.4.4, iconv-lite@^0.4.8, iconv-lite@~0.4.24: dependencies: safer-buffer ">= 2.1.2 < 3" -iconv-lite@^0.7.0, iconv-lite@~0.7.0: +iconv-lite@^0.7.0, iconv-lite@^0.7.2, iconv-lite@~0.7.0: version "0.7.2" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.7.2.tgz#d0bdeac3f12b4835b7359c2ad89c422a4d1cc72e" integrity sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw== @@ -19117,6 +19136,16 @@ import-in-the-middle@^2.0.0, import-in-the-middle@^2.0.6: cjs-module-lexer "^2.2.0" module-details-from-path "^1.0.4" +import-in-the-middle@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/import-in-the-middle/-/import-in-the-middle-3.0.0.tgz#720c12b4c07ea58b32a54667e70a022e18cc36a3" + integrity sha512-OnGy+eYT7wVejH2XWgLRgbmzujhhVIATQH0ztIeRilwHBjTeG3pD+XnH3PKX0r9gJ0BuJmJ68q/oh9qgXnNDQg== + dependencies: + acorn "^8.15.0" + acorn-import-attributes "^1.9.5" + cjs-module-lexer "^2.2.0" + module-details-from-path "^1.0.4" + import-meta-resolve@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/import-meta-resolve/-/import-meta-resolve-3.0.0.tgz#94a6aabc623874fbc2f3525ec1300db71c6cbc11" @@ -21023,10 +21052,10 @@ long@^4.0.0: resolved "https://registry.yarnpkg.com/long/-/long-4.0.0.tgz#9a7b71cfb7d361a194ea555241c92f7468d5bf28" integrity sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA== -long@^5.2.1: - version "5.2.3" - resolved "https://registry.yarnpkg.com/long/-/long-5.2.3.tgz#a3ba97f3877cf1d778eccbcb048525ebb77499e1" - integrity sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q== +long@^5.3.2: + version "5.3.2" + resolved "https://registry.yarnpkg.com/long/-/long-5.3.2.tgz#1d84463095999262d7d7b7f8bfd4a8cc55167f83" + integrity sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA== longest-streak@^3.0.0: version "3.1.0" @@ -21076,7 +21105,7 @@ lru-cache@^5.1.1: dependencies: yallist "^3.0.2" -lru-cache@^7.14.1, lru-cache@^7.4.4, lru-cache@^7.5.1, lru-cache@^7.7.1: +lru-cache@^7.4.4, lru-cache@^7.5.1, lru-cache@^7.7.1: version "7.18.3" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-7.18.3.tgz#f793896e0fd0e954a59dfdd82f0773808df6aa89" integrity sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA== @@ -21089,10 +21118,10 @@ lru-memoizer@2.3.0: lodash.clonedeep "^4.5.0" lru-cache "6.0.0" -lru.min@^1.0.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/lru.min/-/lru.min-1.1.1.tgz#146e01e3a183fa7ba51049175de04667d5701f0e" - integrity sha512-FbAj6lXil6t8z4z3j0E5mfRlPzxkySotzUHwRXjlpRh10vc6AI6WN62ehZj82VG7M20rqogJ0GLwar2Xa05a8Q== +lru.min@^1.1.0, lru.min@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/lru.min/-/lru.min-1.1.4.tgz#6ea1737a8c1ba2300cc87ad46910a4bdffa0117b" + integrity sha512-DqC6n3QQ77zdFpCMASA1a3Jlb64Hv2N2DciFGkO/4L9+q/IpIAuRlKOvCXabtRW6cQf8usbmM6BE/TOPysCdIA== luxon@^3.2.1, luxon@~3.4.0: version "3.4.4" @@ -22032,7 +22061,7 @@ minimalistic-assert@^1.0.0: resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A== -minimatch@10.1.1, minimatch@10.2.4, minimatch@^10.0.3, minimatch@^10.2.2: +minimatch@10.1.1, minimatch@10.2.4, minimatch@^10.2.2, minimatch@^10.2.4: version "10.2.4" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-10.2.4.tgz#465b3accbd0218b8281f5301e27cedc697f96fde" integrity sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg== @@ -22466,20 +22495,19 @@ mute-stream@0.0.8: resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== -mysql2@^3.11.3, mysql2@^3.14.4: - version "3.14.4" - resolved "https://registry.yarnpkg.com/mysql2/-/mysql2-3.14.4.tgz#36e33a8d33820a299fb9e9221486310b1a4c8767" - integrity sha512-Cs/jx3WZPNrYHVz+Iunp9ziahaG5uFMvD2R8Zlmc194AqXNxt9HBNu7ZsPYrUtmJsF0egETCWIdMIYAwOGjL1w== +mysql2@^3.19.1: + version "3.19.1" + resolved "https://registry.yarnpkg.com/mysql2/-/mysql2-3.19.1.tgz#1e9c88646cb2f0cb3d8df6ce56b8f4d6cbf6b013" + integrity sha512-yn4zh+Uxu5J3Zvi6Ao96lJ7BSBRkspHflWQAmOPND+htbpIKDQw99TTvPzgihKO/QyMickZopO4OsnixnpcUwA== dependencies: - aws-ssl-profiles "^1.1.1" + aws-ssl-profiles "^1.1.2" denque "^2.1.0" generate-function "^2.3.1" - iconv-lite "^0.7.0" - long "^5.2.1" - lru.min "^1.0.0" - named-placeholders "^1.1.3" - seq-queue "^0.0.5" - sqlstring "^2.3.2" + iconv-lite "^0.7.2" + long "^5.3.2" + lru.min "^1.1.4" + named-placeholders "^1.1.6" + sql-escaper "^1.3.3" mysql@^2.18.1: version "2.18.1" @@ -22500,12 +22528,12 @@ mz@^2.7.0: object-assign "^4.0.1" thenify-all "^1.0.0" -named-placeholders@^1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/named-placeholders/-/named-placeholders-1.1.3.tgz#df595799a36654da55dda6152ba7a137ad1d9351" - integrity sha512-eLoBxg6wE/rZkJPhU/xRX1WTpkFEwDJEN96oxFrTsqBdbT5ec295Q+CoHrL9IT0DipqKhmGcaZmwOt8OON5x1w== +named-placeholders@^1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/named-placeholders/-/named-placeholders-1.1.6.tgz#c50c6920b43f258f59c16add1e56654f5cc02bb5" + integrity sha512-Tz09sEL2EEuv5fFowm419c1+a/jSMiBjI9gHxVLrVdbUkkNUUfjsVYs9pVZu5oCon/kmRh9TfLEObFtkVxmY0w== dependencies: - lru-cache "^7.14.1" + lru.min "^1.1.0" nanoid@^3.3.11, nanoid@^3.3.6, nanoid@^3.3.8: version "3.3.11" @@ -23686,32 +23714,32 @@ oxc-parser@^0.76.0: "@oxc-parser/binding-win32-arm64-msvc" "0.76.0" "@oxc-parser/binding-win32-x64-msvc" "0.76.0" -oxfmt@^0.32.0: - version "0.32.0" - resolved "https://registry.yarnpkg.com/oxfmt/-/oxfmt-0.32.0.tgz#b6f28b29018724ae320414d5794b18e95a6abfaf" - integrity sha512-KArQhGzt/Y8M1eSAX98Y8DLtGYYDQhkR55THUPY5VNcpFQ+9nRZkL3ULXhagHMD2hIvjy8JSeEQEP5/yYJSrLA== +oxfmt@^0.38.0: + version "0.38.0" + resolved "https://registry.yarnpkg.com/oxfmt/-/oxfmt-0.38.0.tgz#377ba2c263c7df735252dd5e90a96003beb270b5" + integrity sha512-RGYfnnxmCz8dMQ1Oo5KrYkNRc9cne2WL2vfE+datWNkgiSAkfUsqpGLR7rnkN6cQFgQkHDZH400eXN6izJ8Lww== dependencies: tinypool "2.1.0" optionalDependencies: - "@oxfmt/binding-android-arm-eabi" "0.32.0" - "@oxfmt/binding-android-arm64" "0.32.0" - "@oxfmt/binding-darwin-arm64" "0.32.0" - "@oxfmt/binding-darwin-x64" "0.32.0" - "@oxfmt/binding-freebsd-x64" "0.32.0" - "@oxfmt/binding-linux-arm-gnueabihf" "0.32.0" - "@oxfmt/binding-linux-arm-musleabihf" "0.32.0" - "@oxfmt/binding-linux-arm64-gnu" "0.32.0" - "@oxfmt/binding-linux-arm64-musl" "0.32.0" - "@oxfmt/binding-linux-ppc64-gnu" "0.32.0" - "@oxfmt/binding-linux-riscv64-gnu" "0.32.0" - "@oxfmt/binding-linux-riscv64-musl" "0.32.0" - "@oxfmt/binding-linux-s390x-gnu" "0.32.0" - "@oxfmt/binding-linux-x64-gnu" "0.32.0" - "@oxfmt/binding-linux-x64-musl" "0.32.0" - "@oxfmt/binding-openharmony-arm64" "0.32.0" - "@oxfmt/binding-win32-arm64-msvc" "0.32.0" - "@oxfmt/binding-win32-ia32-msvc" "0.32.0" - "@oxfmt/binding-win32-x64-msvc" "0.32.0" + "@oxfmt/binding-android-arm-eabi" "0.38.0" + "@oxfmt/binding-android-arm64" "0.38.0" + "@oxfmt/binding-darwin-arm64" "0.38.0" + "@oxfmt/binding-darwin-x64" "0.38.0" + "@oxfmt/binding-freebsd-x64" "0.38.0" + "@oxfmt/binding-linux-arm-gnueabihf" "0.38.0" + "@oxfmt/binding-linux-arm-musleabihf" "0.38.0" + "@oxfmt/binding-linux-arm64-gnu" "0.38.0" + "@oxfmt/binding-linux-arm64-musl" "0.38.0" + "@oxfmt/binding-linux-ppc64-gnu" "0.38.0" + "@oxfmt/binding-linux-riscv64-gnu" "0.38.0" + "@oxfmt/binding-linux-riscv64-musl" "0.38.0" + "@oxfmt/binding-linux-s390x-gnu" "0.38.0" + "@oxfmt/binding-linux-x64-gnu" "0.38.0" + "@oxfmt/binding-linux-x64-musl" "0.38.0" + "@oxfmt/binding-openharmony-arm64" "0.38.0" + "@oxfmt/binding-win32-arm64-msvc" "0.38.0" + "@oxfmt/binding-win32-ia32-msvc" "0.38.0" + "@oxfmt/binding-win32-x64-msvc" "0.38.0" oxlint-tsgolint@^0.16.0: version "0.16.0" @@ -23725,30 +23753,30 @@ oxlint-tsgolint@^0.16.0: "@oxlint-tsgolint/win32-arm64" "0.16.0" "@oxlint-tsgolint/win32-x64" "0.16.0" -oxlint@^1.50.0: - version "1.50.0" - resolved "https://registry.yarnpkg.com/oxlint/-/oxlint-1.50.0.tgz#e3360d13584ee42ddcddaa0438053037fbe04e88" - integrity sha512-iSJ4IZEICBma8cZX7kxIIz9PzsYLF2FaLAYN6RKu7VwRVKdu7RIgpP99bTZaGl//Yao7fsaGZLSEo5xBrI5ReQ== +oxlint@^1.53.0: + version "1.53.0" + resolved "https://registry.yarnpkg.com/oxlint/-/oxlint-1.53.0.tgz#74b2241c639501b68574550578869055a28f9ee6" + integrity sha512-TLW0PzGbpO1JxUnuy1pIqVPjQUGh4fNfxu5XJbdFIRFVaJ0UFzTjjk/hSFTMRxN6lZub53xL/IwJNEkrh7VtDg== optionalDependencies: - "@oxlint/binding-android-arm-eabi" "1.50.0" - "@oxlint/binding-android-arm64" "1.50.0" - "@oxlint/binding-darwin-arm64" "1.50.0" - "@oxlint/binding-darwin-x64" "1.50.0" - "@oxlint/binding-freebsd-x64" "1.50.0" - "@oxlint/binding-linux-arm-gnueabihf" "1.50.0" - "@oxlint/binding-linux-arm-musleabihf" "1.50.0" - "@oxlint/binding-linux-arm64-gnu" "1.50.0" - "@oxlint/binding-linux-arm64-musl" "1.50.0" - "@oxlint/binding-linux-ppc64-gnu" "1.50.0" - "@oxlint/binding-linux-riscv64-gnu" "1.50.0" - "@oxlint/binding-linux-riscv64-musl" "1.50.0" - "@oxlint/binding-linux-s390x-gnu" "1.50.0" - "@oxlint/binding-linux-x64-gnu" "1.50.0" - "@oxlint/binding-linux-x64-musl" "1.50.0" - "@oxlint/binding-openharmony-arm64" "1.50.0" - "@oxlint/binding-win32-arm64-msvc" "1.50.0" - "@oxlint/binding-win32-ia32-msvc" "1.50.0" - "@oxlint/binding-win32-x64-msvc" "1.50.0" + "@oxlint/binding-android-arm-eabi" "1.53.0" + "@oxlint/binding-android-arm64" "1.53.0" + "@oxlint/binding-darwin-arm64" "1.53.0" + "@oxlint/binding-darwin-x64" "1.53.0" + "@oxlint/binding-freebsd-x64" "1.53.0" + "@oxlint/binding-linux-arm-gnueabihf" "1.53.0" + "@oxlint/binding-linux-arm-musleabihf" "1.53.0" + "@oxlint/binding-linux-arm64-gnu" "1.53.0" + "@oxlint/binding-linux-arm64-musl" "1.53.0" + "@oxlint/binding-linux-ppc64-gnu" "1.53.0" + "@oxlint/binding-linux-riscv64-gnu" "1.53.0" + "@oxlint/binding-linux-riscv64-musl" "1.53.0" + "@oxlint/binding-linux-s390x-gnu" "1.53.0" + "@oxlint/binding-linux-x64-gnu" "1.53.0" + "@oxlint/binding-linux-x64-musl" "1.53.0" + "@oxlint/binding-openharmony-arm64" "1.53.0" + "@oxlint/binding-win32-arm64-msvc" "1.53.0" + "@oxlint/binding-win32-ia32-msvc" "1.53.0" + "@oxlint/binding-win32-x64-msvc" "1.53.0" p-defer@^1.0.0: version "1.0.0" @@ -26972,11 +27000,6 @@ send@~0.19.0, send@~0.19.1: range-parser "~1.2.1" statuses "~2.0.2" -seq-queue@^0.0.5: - version "0.0.5" - resolved "https://registry.yarnpkg.com/seq-queue/-/seq-queue-0.0.5.tgz#d56812e1c017a6e4e7c3e3a37a1da6d78dd3c93e" - integrity sha512-hr3Wtp/GZIc/6DAGPDcV4/9WoZhjrkXsi5B/07QgX8tsdc6ilr7BFM6PM6rbdAX1kFSDYeZGLipIZZKyQP0O5Q== - serialize-javascript@^6.0.0, serialize-javascript@^6.0.1, serialize-javascript@^6.0.2: version "6.0.2" resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.2.tgz#defa1e055c83bf6d59ea805d8da862254eb6a6c2" @@ -27315,9 +27338,9 @@ simple-get@^4.0.0, simple-get@^4.0.1: simple-concat "^1.0.0" simple-git@^3.28.0: - version "3.30.0" - resolved "https://registry.yarnpkg.com/simple-git/-/simple-git-3.30.0.tgz#260b816f369c298b60a509a319b4f0b9fadbd7e0" - integrity sha512-q6lxyDsCmEal/MEGhP1aVyQ3oxnagGlBDOVSIB4XUVLl1iZh0Pah6ebC9V4xBap/RfgP2WlI8EKs0WS0rMEJHg== + version "3.33.0" + resolved "https://registry.yarnpkg.com/simple-git/-/simple-git-3.33.0.tgz#b903dc70f5b93535a4f64ff39172da43058cfb88" + integrity sha512-D4V/tGC2sjsoNhoMybKyGoE+v8A60hRawKQ1iFRA1zwuDgGZCBJ4ByOzZ5J8joBbi4Oam0qiPH+GhzmSBwbJng== dependencies: "@kwsites/file-exists" "^1.1.1" "@kwsites/promise-deferred" "^1.1.1" @@ -27814,16 +27837,16 @@ sprintf-js@~1.0.2: resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" integrity sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw= +sql-escaper@^1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/sql-escaper/-/sql-escaper-1.3.3.tgz#65faf89f048d26bb9a75566b82b5990ddf8a5b7f" + integrity sha512-BsTCV265VpTp8tm1wyIm1xqQCS+Q9NHx2Sr+WcnUrgLrQ6yiDIvHYJV5gHxsj1lMBy2zm5twLaZao8Jd+S8JJw== + sqlstring@2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/sqlstring/-/sqlstring-2.3.1.tgz#475393ff9e91479aea62dcaf0ca3d14983a7fb40" integrity sha1-R1OT/56RR5rqYtyvDKPRSYOn+0A= -sqlstring@^2.3.2: - version "2.3.3" - resolved "https://registry.yarnpkg.com/sqlstring/-/sqlstring-2.3.3.tgz#2ddc21f03bce2c387ed60680e739922c65751d0c" - integrity sha512-qC9iz2FlN7DQl3+wjwn3802RTyjCx7sDvfQEXchwa6CWOx07/WVfh91gBmQ9fahw8snwGEWU3xGzOt4tFyHLxg== - srvx@^0.11.2: version "0.11.4" resolved "https://registry.yarnpkg.com/srvx/-/srvx-0.11.4.tgz#0d1dd962c2320f84fc7872f2500b21c84c3d1b97" @@ -28210,10 +28233,10 @@ strnum@^2.1.2: resolved "https://registry.yarnpkg.com/strnum/-/strnum-2.1.2.tgz#a5e00ba66ab25f9cafa3726b567ce7a49170937a" integrity sha512-l63NF9y/cLROq/yqKXSLtcMeeyOfnSQlfMSlzFt/K73oIaD8DGaQWd7Z34X9GPiKqP5rbSh84Hl4bOlLcjiSrQ== -strtok3@^10.2.0, strtok3@^10.2.2: - version "10.3.1" - resolved "https://registry.yarnpkg.com/strtok3/-/strtok3-10.3.1.tgz#80fe431a4ee652de4e33f14e11e15fd5170a627d" - integrity sha512-3JWEZM6mfix/GCJBBUrkA8p2Id2pBkyTkVCJKto55w080QBKZ+8R171fGrbiSp+yMO/u6F8/yUh7K4V9K+YCnw== +strtok3@^10.2.2, strtok3@^10.3.4: + version "10.3.4" + resolved "https://registry.yarnpkg.com/strtok3/-/strtok3-10.3.4.tgz#793ebd0d59df276a085586134b73a406e60be9c1" + integrity sha512-KIy5nylvC5le1OdaaoCJ07L+8iQzJHGH6pWDuzS+d07Cu7n1MZ2x26P8ZKIWfbK02+XIL8Mp4RkWeqdUCrDMfg== dependencies: "@tokenizer/token" "^0.3.0" @@ -28284,6 +28307,7 @@ stylus@0.59.0, stylus@^0.59.0: sucrase@^3.27.0, sucrase@^3.35.0, sucrase@getsentry/sucrase#es2020-polyfills: version "3.36.0" + uid fd682f6129e507c00bb4e6319cc5d6b767e36061 resolved "https://codeload.github.com/getsentry/sucrase/tar.gz/fd682f6129e507c00bb4e6319cc5d6b767e36061" dependencies: "@jridgewell/gen-mapping" "^0.3.2" @@ -28534,18 +28558,18 @@ tarn@^3.0.2: resolved "https://registry.yarnpkg.com/tarn/-/tarn-3.0.2.tgz#73b6140fbb881b71559c4f8bfde3d9a4b3d27693" integrity sha512-51LAVKUSZSVfI05vjPESNc5vwqqZpbXCsU+/+wxlOrUjk2SnFTt97v9ZgQrD4YmxYW1Px6w2KjaDitCfkvgxMQ== -tedious@^18.6.1: - version "18.6.1" - resolved "https://registry.yarnpkg.com/tedious/-/tedious-18.6.1.tgz#1c4a3f06c891be67a032117e2e25193286d44496" - integrity sha512-9AvErXXQTd6l7TDd5EmM+nxbOGyhnmdbp/8c3pw+tjaiSXW9usME90ET/CRG1LN1Y9tPMtz/p83z4Q97B4DDpw== +tedious@^19.2.1: + version "19.2.1" + resolved "https://registry.yarnpkg.com/tedious/-/tedious-19.2.1.tgz#58edc3838ebacc34ff3856177905137bc8ba3b1f" + integrity sha512-pk1Q16Yl62iocuQB+RWbg6rFUFkIyzqOFQ6NfysCltRvQqKwfurgj8v/f2X+CKvDhSL4IJ0cCOfCHDg9PWEEYA== dependencies: "@azure/core-auth" "^1.7.2" "@azure/identity" "^4.2.1" "@azure/keyvault-keys" "^4.4.0" - "@js-joda/core" "^5.6.1" + "@js-joda/core" "^5.6.5" "@types/node" ">=18" - bl "^6.0.11" - iconv-lite "^0.6.3" + bl "^6.1.4" + iconv-lite "^0.7.0" js-md4 "^0.3.2" native-duplexpair "^1.0.0" sprintf-js "^1.1.3" @@ -28890,11 +28914,12 @@ toidentifier@~1.0.1: resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== -token-types@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/token-types/-/token-types-6.0.0.tgz#1ab26be1ef9c434853500c071acfe5c8dd6544a3" - integrity sha512-lbDrTLVsHhOMljPscd0yitpozq7Ga2M5Cvez5AjGg8GASBjtt6iERCAJ93yommPmz62fb45oFIXHEZ3u9bfJEA== +token-types@^6.0.0, token-types@^6.1.1: + version "6.1.2" + resolved "https://registry.yarnpkg.com/token-types/-/token-types-6.1.2.tgz#18d0fd59b996d421f9f83914d6101c201bd08129" + integrity sha512-dRXchy+C0IgK8WPC6xvCHFRIWYUbqqdEIKPaKo/AcTUNzwLTK6AH7RjdLWsEZcAN/TBdtfUw3PYEgPr5VPr6ww== dependencies: + "@borewit/text-codec" "^0.2.1" "@tokenizer/token" "^0.3.0" ieee754 "^1.2.1" @@ -31132,9 +31157,9 @@ yarn-deduplicate@6.0.2: tslib "^2.5.0" yauzl@^3.1.3: - version "3.2.0" - resolved "https://registry.yarnpkg.com/yauzl/-/yauzl-3.2.0.tgz#7b6cb548f09a48a6177ea0be8ece48deb7da45c0" - integrity sha512-Ow9nuGZE+qp1u4JIPvg+uCiUr7xGQWdff7JQSk5VGYTAZMDe2q8lxJ10ygv10qmSj031Ty/6FNJpLO4o1Sgc+w== + version "3.2.1" + resolved "https://registry.yarnpkg.com/yauzl/-/yauzl-3.2.1.tgz#d35befb9a0fdd328da41926be895ade2de14dbe7" + integrity sha512-k1isifdbpNSFEHFJ1ZY4YDewv0IH9FR61lDetaRMD3j2ae3bIXGV+7c+LHCqtQGofSd8PIyV4X6+dHMAnSr60A== dependencies: buffer-crc32 "~0.2.3" pend "~1.2.0"