From 7855f254b73ca2315412cf25ee78df52b326ca32 Mon Sep 17 00:00:00 2001 From: Kaylee <65376239+KayleeWilliams@users.noreply.github.com> Date: Fri, 17 Apr 2026 10:29:05 +0100 Subject: [PATCH 1/7] chore: add initial project setup --- .claude/CLAUDE.md | 123 ++++++++++++++++++ .claude/settings.json | 15 +++ .cursor/hooks.json | 10 ++ .gitignore | 38 ++++++ .husky/pre-commit | 32 +++++ .npmrc | 0 .vscode/settings.json | 54 ++++++++ .zed/settings.json | 50 +++++++ AGENTS.md | 123 ++++++++++++++++++ biome.jsonc | 8 ++ bun.lock | 120 +++++++++++++++++ package.json | 30 +++++ packages/typescript-config/base.json | 19 +++ packages/typescript-config/nextjs.json | 12 ++ packages/typescript-config/package.json | 9 ++ packages/typescript-config/react-library.json | 7 + turbo.json | 21 +++ 17 files changed, 671 insertions(+) create mode 100644 .claude/CLAUDE.md create mode 100644 .claude/settings.json create mode 100644 .cursor/hooks.json create mode 100644 .gitignore create mode 100644 .husky/pre-commit create mode 100644 .npmrc create mode 100644 .vscode/settings.json create mode 100644 .zed/settings.json create mode 100644 AGENTS.md create mode 100644 biome.jsonc create mode 100644 bun.lock create mode 100644 package.json create mode 100644 packages/typescript-config/base.json create mode 100644 packages/typescript-config/nextjs.json create mode 100644 packages/typescript-config/package.json create mode 100644 packages/typescript-config/react-library.json create mode 100644 turbo.json diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md new file mode 100644 index 0000000..0a7bb42 --- /dev/null +++ b/.claude/CLAUDE.md @@ -0,0 +1,123 @@ +# Ultracite Code Standards + +This project uses **Ultracite**, a zero-config preset that enforces strict code quality standards through automated formatting and linting. + +## Quick Reference + +- **Format code**: `bun x ultracite fix` +- **Check for issues**: `bun x ultracite check` +- **Diagnose setup**: `bun x ultracite doctor` + +Biome (the underlying engine) provides robust linting and formatting. Most issues are automatically fixable. + +--- + +## Core Principles + +Write code that is **accessible, performant, type-safe, and maintainable**. Focus on clarity and explicit intent over brevity. + +### Type Safety & Explicitness + +- Use explicit types for function parameters and return values when they enhance clarity +- Prefer `unknown` over `any` when the type is genuinely unknown +- Use const assertions (`as const`) for immutable values and literal types +- Leverage TypeScript's type narrowing instead of type assertions +- Use meaningful variable names instead of magic numbers - extract constants with descriptive names + +### Modern JavaScript/TypeScript + +- Use arrow functions for callbacks and short functions +- Prefer `for...of` loops over `.forEach()` and indexed `for` loops +- Use optional chaining (`?.`) and nullish coalescing (`??`) for safer property access +- Prefer template literals over string concatenation +- Use destructuring for object and array assignments +- Use `const` by default, `let` only when reassignment is needed, never `var` + +### Async & Promises + +- Always `await` promises in async functions - don't forget to use the return value +- Use `async/await` syntax instead of promise chains for better readability +- Handle errors appropriately in async code with try-catch blocks +- Don't use async functions as Promise executors + +### React & JSX + +- Use function components over class components +- Call hooks at the top level only, never conditionally +- Specify all dependencies in hook dependency arrays correctly +- Use the `key` prop for elements in iterables (prefer unique IDs over array indices) +- Nest children between opening and closing tags instead of passing as props +- Don't define components inside other components +- Use semantic HTML and ARIA attributes for accessibility: + - Provide meaningful alt text for images + - Use proper heading hierarchy + - Add labels for form inputs + - Include keyboard event handlers alongside mouse events + - Use semantic elements (` + ))} + + {resolved ? ( +
+          {resolved}
+        
+ ) : null} + {children} + + ); +} diff --git a/packages/docs/src/components/selector.tsx b/packages/docs/src/components/selector.tsx new file mode 100644 index 0000000..6348743 --- /dev/null +++ b/packages/docs/src/components/selector.tsx @@ -0,0 +1,57 @@ +"use client"; + +import { type ReactNode, useId, useState } from "react"; + +export type SelectorOption = { + value: string; + label: string; +}; + +export type SelectorProps = { + label?: string; + options: SelectorOption[]; + defaultValue?: string; + children?: (activeValue: string) => ReactNode; +}; + +/** + * Minimal dropdown-style selector. Consumers typically replace this with + * their own styled version — the default just renders a native ` setActiveValue(event.target.value)} + value={activeValue} + > + {options.map((option) => ( + + ))} + +
+ {children ? children(activeValue) : null} +
+ + ); +} diff --git a/packages/docs/src/components/steps.tsx b/packages/docs/src/components/steps.tsx new file mode 100644 index 0000000..76362fc --- /dev/null +++ b/packages/docs/src/components/steps.tsx @@ -0,0 +1,27 @@ +import type { HTMLAttributes, ReactNode } from "react"; + +export type StepsProps = HTMLAttributes & { + children?: ReactNode; +}; + +export function Steps({ children, ...rest }: StepsProps) { + return ( +
    + {children} +
+ ); +} + +export type StepProps = HTMLAttributes & { + title?: string; + children?: ReactNode; +}; + +export function Step({ title, children, ...rest }: StepProps) { + return ( +
  • + {title ?

    {title}

    : null} +
    {children}
    +
  • + ); +} diff --git a/packages/docs/src/components/tabs.tsx b/packages/docs/src/components/tabs.tsx new file mode 100644 index 0000000..3ed73fa --- /dev/null +++ b/packages/docs/src/components/tabs.tsx @@ -0,0 +1,94 @@ +"use client"; + +import { + createContext, + type ReactNode, + useContext, + useId, + useMemo, + useState, +} from "react"; + +type TabsContextValue = { + items: string[]; + activeValue: string; + setActiveValue: (value: string) => void; +}; + +const TabsContext = createContext(null); + +function useTabsContext(): TabsContextValue { + const ctx = useContext(TabsContext); + if (!ctx) { + throw new Error(" must be used inside "); + } + return ctx; +} + +function normalize(value: string): string { + return value.toLowerCase().replace(/\s+/g, "-"); +} + +export type TabsProps = { + items?: string[]; + defaultIndex?: number; + children?: ReactNode; +}; + +export function Tabs({ items = [], defaultIndex = 0, children }: TabsProps) { + const initial = items[defaultIndex] ?? items[0] ?? ""; + const [activeValue, setActiveValue] = useState(normalize(initial)); + const groupId = useId(); + + const value = useMemo( + () => ({ items, activeValue, setActiveValue }), + [items, activeValue] + ); + + return ( +
    + {items.length > 0 ? ( +
    + {items.map((item) => { + const normalized = normalize(item); + const isActive = normalized === activeValue; + return ( + + ); + })} +
    + ) : null} + {children} +
    + ); +} + +export type TabProps = { + value: string; + children?: ReactNode; +}; + +export function Tab({ value, children }: TabProps) { + const { activeValue } = useTabsContext(); + const normalized = normalize(value); + if (normalized !== activeValue) { + return null; + } + return ( +
    + {children} +
    + ); +} diff --git a/packages/docs/src/components/type-table.tsx b/packages/docs/src/components/type-table.tsx new file mode 100644 index 0000000..b317a9d --- /dev/null +++ b/packages/docs/src/components/type-table.tsx @@ -0,0 +1,91 @@ +import type { ReactNode } from "react"; + +export type TypeTableProperty = { + description?: ReactNode; + type: string; + typeDescription?: ReactNode; + typeDescriptionLink?: string; + default?: string; + required?: boolean; + deprecated?: boolean; +}; + +export type TypeTableProps = { + type?: Record; +}; + +export function TypeTable({ type }: TypeTableProps) { + const rows = Object.entries(type ?? {}); + if (rows.length === 0) { + return null; + } + return ( + + + + + + + + + + + {rows.map(([name, property]) => ( + + + + + + + ))} + +
    PropTypeDefaultDescription
    + {name} + {property.required ? ( + + * + + ) : null} + + {property.typeDescriptionLink ? ( + + {property.type} + + ) : ( + {property.type} + )} + {property.typeDescription ? ( +
    + {property.typeDescription} +
    + ) : null} +
    {property.default ? {property.default} : "—"}{property.description}
    + ); +} + +export type AutoTypeTableProps = { + /** Path to the source file — rendered as a caption; actual type extraction happens at build time via the remark plugin */ + path?: string; + /** The exported type name in the source file */ + name?: string; + type?: Record; +}; + +export function AutoTypeTable({ path, name, type }: AutoTypeTableProps) { + return ( +
    + {path && name ? ( +
    + + {name} from {path} + +
    + ) : null} + +
    + ); +} diff --git a/packages/docs/src/convert/convert.ts b/packages/docs/src/convert/convert.ts new file mode 100644 index 0000000..e879808 --- /dev/null +++ b/packages/docs/src/convert/convert.ts @@ -0,0 +1,455 @@ +import { execFile } from "node:child_process"; +import { existsSync } from "node:fs"; +import { access, mkdir, readFile, writeFile } from "node:fs/promises"; +import { basename, dirname, join, relative, resolve, sep } from "node:path"; +import { promisify } from "node:util"; +import fg from "fast-glob"; +import matter from "gray-matter"; +import { remark } from "remark"; +import remarkGfm from "remark-gfm"; +import remarkMdx from "remark-mdx"; +import type { Pluggable, PluggableList } from "unified"; +import { log } from "../internal/logger"; + +const execFileAsync = promisify(execFile); + +const FRONTMATTER_REGEX = /^---\n([\s\S]*?)\n---\n([\s\S]*)$/; +const HEADING_REGEX = /^#\s+(.+)$/m; +const YAML_QUOTE_REGEX = /["\\]/g; +const TABLE_DIVIDER_REGEX = /^:?-{2,}:?$/; +const MERMAID_FENCE_REGEX = /```mermaid\n([\s\S]*?)\n```/g; +const HTML_BREAK_REGEX = //gi; +const MDX_EXTENSION_REGEX = /\.mdx$/; +const TITLE_CASE_REGEX = /\b\w/g; +const NAME_SEPARATOR_REGEX = /[-_]+/g; +const LIST_PREFIX_REGEX = /^\d+\.\s/; + +type RemarkProcessor = ReturnType; + +let cachedProcessor: RemarkProcessor | null = null; +let cachedPluginIds: PluggableList = []; + +/** + * Create (and cache) a remark processor with the given plugins. Plugins are + * matched by identity — if the same plugin array is passed again, the existing + * processor is reused. Plugins must be stateless/module-safe for reuse. + */ +function createRemarkProcessor( + additionalPlugins: PluggableList = [] +): RemarkProcessor { + const sameLength = cachedPluginIds.length === additionalPlugins.length; + const sameIdentity = + sameLength && + additionalPlugins.every((plugin, i) => plugin === cachedPluginIds[i]); + + if (cachedProcessor && sameIdentity) { + return cachedProcessor; + } + + let processor: RemarkProcessor = remark() + .use(remarkMdx) + .use(remarkGfm) + .data("settings", { + tableCellPadding: false, + tablePipeAlign: false, + } as Record); + + for (const plugin of additionalPlugins) { + if (Array.isArray(plugin)) { + const [factory, ...args] = plugin as [Pluggable, ...unknown[]]; + // biome-ignore lint/suspicious/noExplicitAny: unified's .use() overloads are too narrow for dynamic plugin arrays + processor = (processor as any).use(factory, ...args); + continue; + } + // biome-ignore lint/suspicious/noExplicitAny: unified's .use() overloads are too narrow for dynamic plugin arrays + processor = (processor as any).use(plugin); + } + + cachedProcessor = processor; + cachedPluginIds = additionalPlugins.slice(0); + return processor; +} + +function toYamlScalar(value: string): string { + return `"${value.replace(YAML_QUOTE_REGEX, "\\$&")}"`; +} + +function titleFromFileName(sourcePath: string): string { + const fileName = basename(sourcePath, ".mdx") + .replace(NAME_SEPARATOR_REGEX, " ") + .trim(); + return fileName.replace(TITLE_CASE_REGEX, (match) => match.toUpperCase()); +} + +/** + * Build a title + description frontmatter from the markdown body when the + * source file didn't include its own frontmatter block. + */ +function synthesizeFrontmatter(sourcePath: string, markdown: string): string { + const title = + markdown.match(HEADING_REGEX)?.[1]?.trim() ?? titleFromFileName(sourcePath); + + const lines = markdown.split("\n"); + const paragraphLines: string[] = []; + let insideFence = false; + + for (const rawLine of lines) { + const line = rawLine.trim(); + if (line.startsWith("```")) { + insideFence = !insideFence; + continue; + } + if (insideFence || line.length === 0) { + if (paragraphLines.length > 0) { + break; + } + continue; + } + if ( + line.startsWith("#") || + line.startsWith(">") || + line.startsWith("|") || + line.startsWith("<") || + line.startsWith("- ") || + line.startsWith("* ") || + LIST_PREFIX_REGEX.test(line) + ) { + if (paragraphLines.length > 0) { + break; + } + continue; + } + paragraphLines.push(line); + } + + const description = paragraphLines.join(" ").trim(); + const frontmatterLines = [`title: ${toYamlScalar(title)}`]; + + if (description.length > 0) { + frontmatterLines.push(`description: ${toYamlScalar(description)}`); + } + + return frontmatterLines.join("\n"); +} + +function compactTableCell(cell: string): string { + const trimmed = cell.trim(); + if (TABLE_DIVIDER_REGEX.test(trimmed)) { + const leftAligned = trimmed.startsWith(":"); + const rightAligned = trimmed.endsWith(":"); + return `${leftAligned ? ":" : ""}--${rightAligned ? ":" : ""}`; + } + return trimmed; +} + +function compactMarkdownTables(markdown: string): string { + const lines = markdown.split("\n"); + const compacted: string[] = []; + let insideFence = false; + + for (const rawLine of lines) { + if (rawLine.trim().startsWith("```")) { + insideFence = !insideFence; + compacted.push(rawLine); + continue; + } + + const trimmed = rawLine.trim(); + const isTableLine = + !insideFence && + trimmed.startsWith("|") && + trimmed.endsWith("|") && + trimmed.slice(1, -1).includes("|"); + + if (!isTableLine) { + compacted.push(rawLine); + continue; + } + + const indent = rawLine.match(/^\s*/)?.[0] ?? ""; + const cells = trimmed + .slice(1, -1) + .split("|") + .map((cell) => compactTableCell(cell)); + compacted.push(`${indent}|${cells.join("|")}|`); + } + + return compacted.join("\n"); +} + +function compactMermaidBlocks(markdown: string): string { + return markdown.replace(MERMAID_FENCE_REGEX, (_block, body: string) => { + const compactBody = body + .split("\n") + .map((line) => line.replace(HTML_BREAK_REGEX, " - ")) + .map((line) => line.replace(/,\s+-\s+/g, " - ")) + .join("\n"); + return `\`\`\`mermaid\n${compactBody}\n\`\`\``; + }); +} + +export type MdxToMarkdownConfig = { + /** Source directory containing .mdx files */ + srcDir?: string; + /** Output directory for .md files */ + outDir?: string; + /** Additional remark plugins (e.g. defaultRemarkPlugins from @inth/docs/remark) */ + remarkPlugins?: PluggableList; + /** + * If true, inject `lastModified` (ISO-8601) and `lastAuthor` into the + * output frontmatter by running `git log -1` against each source file. + * Silently skipped for files that are untracked or when git is unavailable. + */ + enrichFrontmatterFromGit?: boolean; +}; + +type GitEnrichment = { + lastModified?: string; + lastAuthor?: string; +}; + +/** + * Read the last commit's author-date and author-name for a file. Best-effort — + * returns empty object on any failure (untracked file, no .git, missing + * binary) so callers never need to handle errors. + */ +async function enrichFromGit(filePath: string): Promise { + try { + const { stdout } = await execFileAsync( + "git", + ["log", "-1", "--format=%aI|%an", "--", filePath], + { cwd: dirname(filePath) } + ); + const line = stdout.trim(); + if (!line) { + return {}; + } + const [iso, author] = line.split("|"); + const enrichment: GitEnrichment = {}; + if (iso) { + enrichment.lastModified = iso; + } + if (author) { + enrichment.lastAuthor = author; + } + return enrichment; + } catch { + return {}; + } +} + +function applyEnrichment( + frontmatterBlock: string, + enrichment: GitEnrichment +): string { + if (!(enrichment.lastModified || enrichment.lastAuthor)) { + return frontmatterBlock; + } + const parsed = matter(`---\n${frontmatterBlock}\n---\n`); + const merged: Record = { + ...parsed.data, + ...(enrichment.lastModified && { lastModified: enrichment.lastModified }), + ...(enrichment.lastAuthor && { lastAuthor: enrichment.lastAuthor }), + }; + const restringified = matter.stringify("", merged).trim(); + return restringified + .replace(/^---\s*\n/, "") + .replace(/\n---\s*$/, "") + .trim(); +} + +export type ConvertResult = { + markdown: string; + frontmatter: string; +}; + +/** + * Convert a single MDX file to markdown in memory. Returns the rendered + * markdown plus the (possibly synthesized) frontmatter block. + */ +export async function convertMdxFile( + sourcePath: string, + remarkPlugins: PluggableList = [], + enrichFromGitFlag = false +): Promise { + const raw = await readFile(sourcePath, "utf8"); + const processor = createRemarkProcessor(remarkPlugins); + const frontmatterMatch = raw.match(FRONTMATTER_REGEX); + let frontmatter = ""; + let content = raw; + + if (frontmatterMatch) { + frontmatter = frontmatterMatch[1] ?? ""; + content = frontmatterMatch[2] ?? ""; + } + + const processed = await processor.process({ + value: content, + path: sourcePath, + }); + + const markdown = compactMermaidBlocks( + compactMarkdownTables(String(processed)) + ); + let resolvedFrontmatter = + frontmatter.trim().length > 0 + ? frontmatter + : synthesizeFrontmatter(sourcePath, markdown); + + if (enrichFromGitFlag) { + const enrichment = await enrichFromGit(sourcePath); + resolvedFrontmatter = applyEnrichment(resolvedFrontmatter, enrichment); + } + + const withFrontmatter = resolvedFrontmatter + ? `---\n${resolvedFrontmatter}\n---\n${markdown}` + : markdown; + + return { + markdown: withFrontmatter, + frontmatter: resolvedFrontmatter, + }; +} + +function deriveOutputPath( + inputFilePath: string, + srcDir: string, + outDir: string +): string { + const normalizedSrcDir = resolve(srcDir) + sep; + const normalizedInput = resolve(inputFilePath); + + if ( + normalizedInput.toLowerCase().startsWith(normalizedSrcDir.toLowerCase()) + ) { + const relativePath = relative(srcDir, normalizedInput); + return join(outDir, relativePath.replace(MDX_EXTENSION_REGEX, ".md")); + } + + return join( + outDir, + basename(normalizedInput).replace(MDX_EXTENSION_REGEX, ".md") + ); +} + +async function processMdxFile( + mdxFilePath: string, + srcDir: string, + outDir: string, + remarkPlugins: PluggableList, + enrichFromGitFlag: boolean, + writeToStdout = false +): Promise { + const resolvedPath = resolve(mdxFilePath); + try { + await access(resolvedPath); + } catch { + log.error(`File not found: ${resolvedPath}`); + return false; + } + + if (!resolvedPath.endsWith(".mdx")) { + log.error(`Not an MDX file: ${resolvedPath}`); + return false; + } + + try { + const { markdown } = await convertMdxFile( + resolvedPath, + remarkPlugins, + enrichFromGitFlag + ); + const outputPath = deriveOutputPath(resolvedPath, srcDir, outDir); + + if (writeToStdout) { + process.stdout.write(markdown); + } + + await mkdir(dirname(outputPath), { recursive: true }); + await writeFile(outputPath, markdown); + + if (!writeToStdout) { + log.summary(`Converted: ${resolvedPath} → ${outputPath}`); + } + return true; + } catch (error) { + log.error(`Failed to process ${mdxFilePath}: ${String(error)}`); + return false; + } +} + +/** + * Convert a single MDX file and write the output. Also writes to stdout so + * build scripts can pipe/stream output when invoked on one file at a time. + */ +export async function convertSingleMdxFile( + mdxFilePath: string, + config: MdxToMarkdownConfig = {} +): Promise { + const srcDir = config.srcDir + ? resolve(config.srcDir) + : resolve(process.cwd(), ".c15t"); + const outDir = config.outDir + ? resolve(config.outDir) + : resolve(process.cwd(), "public"); + const remarkPlugins = config.remarkPlugins ?? []; + return await processMdxFile( + mdxFilePath, + srcDir, + outDir, + remarkPlugins, + config.enrichFrontmatterFromGit ?? false, + true + ); +} + +/** + * Convert every .mdx file under srcDir to .md under outDir (preserving the + * relative directory structure). + */ +export async function convertAllMdx( + config: MdxToMarkdownConfig = {} +): Promise { + const srcDir = config.srcDir + ? resolve(config.srcDir) + : resolve(process.cwd(), ".c15t"); + const outDir = config.outDir + ? resolve(config.outDir) + : resolve(process.cwd(), "public"); + + if (!existsSync(srcDir)) { + return; + } + + const mdxFiles = await fg("**/*.mdx", { + cwd: srcDir, + absolute: true, + onlyFiles: true, + }); + + if (mdxFiles.length === 0) { + return; + } + + const remarkPlugins = config.remarkPlugins ?? []; + const enrichFromGitFlag = config.enrichFrontmatterFromGit ?? false; + let converted = 0; + + for (const mdxFilePath of mdxFiles) { + try { + const { markdown } = await convertMdxFile( + mdxFilePath, + remarkPlugins, + enrichFromGitFlag + ); + const outputPath = deriveOutputPath(mdxFilePath, srcDir, outDir); + await mkdir(dirname(outputPath), { recursive: true }); + await writeFile(outputPath, markdown); + converted += 1; + } catch (fileError) { + log.error(`Failed to process ${mdxFilePath}: ${String(fileError)}`); + } + } + + log.verbose(`Converted ${converted} MDX files`); +} diff --git a/packages/docs/src/convert/index.ts b/packages/docs/src/convert/index.ts new file mode 100644 index 0000000..6fa35b5 --- /dev/null +++ b/packages/docs/src/convert/index.ts @@ -0,0 +1,7 @@ +export { + type ConvertResult, + convertAllMdx, + convertMdxFile, + convertSingleMdxFile, + type MdxToMarkdownConfig, +} from "./convert"; diff --git a/packages/docs/src/internal/logger.ts b/packages/docs/src/internal/logger.ts new file mode 100644 index 0000000..a8d1b2c --- /dev/null +++ b/packages/docs/src/internal/logger.ts @@ -0,0 +1,15 @@ +const VERBOSE = process.env.INTH_DOCS_VERBOSE === "1"; + +export const log = { + error(message: string): void { + process.stderr.write(`[inth-docs] error: ${message}\n`); + }, + summary(message: string): void { + process.stdout.write(`[inth-docs] ${message}\n`); + }, + verbose(message: string): void { + if (VERBOSE) { + process.stderr.write(`[inth-docs] ${message}\n`); + } + }, +}; diff --git a/packages/docs/src/lint/cli.ts b/packages/docs/src/lint/cli.ts new file mode 100644 index 0000000..88bbda1 --- /dev/null +++ b/packages/docs/src/lint/cli.ts @@ -0,0 +1,146 @@ +#!/usr/bin/env node +import { resolve } from "node:path"; +import { type ReporterFormat, renderReport } from "./reporters"; +import { DEFAULT_IGNORE_GLOBS, type LintSeverity, lintDocs } from "./runner"; + +type CliArgs = { + srcDir: string; + changelogDir?: string; + format: ReporterFormat; + ignore: string[]; + unknownFieldSeverity: LintSeverity; + maxWarnings: number; + help: boolean; +}; + +const USAGE = `inth-docs-lint — validate MDX frontmatter and meta.json against a schema + +Usage: + inth-docs-lint [srcDir] [options] + +Options: + --src Source directory (default: ./content) + --changelog Subdirectory that uses the changelog schema + --format pretty | json | github (default: pretty) + --ignore Glob to skip (repeatable). Default: shared/**, _partials/** + --warn-unknown Unknown fields warn (default) + --error-unknown Unknown fields error + --max-warnings Exit non-zero if warnings exceed n (default: Infinity) + -h, --help Show this help + +Exit codes: + 0 No errors (warnings under --max-warnings) + 1 Errors present or warnings exceeded + 2 CLI usage error +`; + +function parseArgs(argv: string[]): CliArgs { + const args: CliArgs = { + srcDir: "content", + format: "pretty", + ignore: [], + unknownFieldSeverity: "warn", + maxWarnings: Number.POSITIVE_INFINITY, + help: false, + }; + let positional = 0; + for (let i = 0; i < argv.length; i++) { + const arg = argv[i]; + if (arg === "-h" || arg === "--help") { + args.help = true; + } else if (arg === "--src") { + const value = argv[++i]; + if (!value) { + throw new Error("--src requires a value"); + } + args.srcDir = value; + } else if (arg === "--changelog") { + const value = argv[++i]; + if (!value) { + throw new Error("--changelog requires a value"); + } + args.changelogDir = value; + } else if (arg === "--format") { + const value = argv[++i]; + if (value !== "pretty" && value !== "json" && value !== "github") { + throw new Error(`--format must be pretty|json|github, got ${value}`); + } + args.format = value; + } else if (arg === "--ignore") { + const value = argv[++i]; + if (!value) { + throw new Error("--ignore requires a value"); + } + args.ignore.push(value); + } else if (arg === "--warn-unknown") { + args.unknownFieldSeverity = "warn"; + } else if (arg === "--error-unknown") { + args.unknownFieldSeverity = "error"; + } else if (arg === "--max-warnings") { + const value = argv[++i]; + if (!value) { + throw new Error("--max-warnings requires a value"); + } + const parsed = Number.parseInt(value, 10); + if (Number.isNaN(parsed) || parsed < 0) { + throw new Error("--max-warnings must be a non-negative integer"); + } + args.maxWarnings = parsed; + } else if (arg && !arg.startsWith("-")) { + if (positional === 0) { + args.srcDir = arg; + } else { + throw new Error(`unexpected positional argument: ${arg}`); + } + positional += 1; + } else if (arg) { + throw new Error(`unknown option: ${arg}`); + } + } + + if (args.ignore.length === 0) { + args.ignore = [...DEFAULT_IGNORE_GLOBS]; + } + return args; +} + +async function main(): Promise { + let args: CliArgs; + try { + args = parseArgs(process.argv.slice(2)); + } catch (error) { + process.stderr.write(`${String(error)}\n\n${USAGE}`); + process.exit(2); + } + + if (args.help) { + process.stdout.write(USAGE); + return; + } + + const result = await lintDocs({ + srcDir: resolve(args.srcDir), + changelogDir: args.changelogDir ? resolve(args.changelogDir) : undefined, + ignore: args.ignore, + unknownFieldSeverity: args.unknownFieldSeverity, + }); + + const output = renderReport(args.format, result); + if (args.format === "github") { + process.stdout.write(output); + } else if (args.format === "json") { + process.stdout.write(output); + } else { + // Pretty goes to stderr so JSON piping via stdout stays clean when scripts + // mix and match. + process.stderr.write(output); + } + + const exceedsWarnings = result.summary.warnings > args.maxWarnings; + process.exit(result.summary.errors > 0 || exceedsWarnings ? 1 : 0); +} + +main().catch((error) => { + process.stderr.write(`docs-lint: ${String(error)}\n`); + process.exit(1); +}); diff --git a/packages/docs/src/lint/index.ts b/packages/docs/src/lint/index.ts new file mode 100644 index 0000000..7c923b9 --- /dev/null +++ b/packages/docs/src/lint/index.ts @@ -0,0 +1,28 @@ +/** @biome-ignore lint/performance/noBarrelFile: package entry point */ + +export { + githubReporter, + jsonReporter, + prettyReporter, + type ReporterFormat, + renderReport, +} from "./reporters"; +export { + DEFAULT_IGNORE_GLOBS, + type LintOptions, + type LintResult, + type LintRule, + type LintSeverity, + type LintSummary, + type LintViolation, + lintDocs, +} from "./runner"; +export { + allowedKeys, + type DefaultChangelogFrontmatter, + type DefaultFrontmatter, + type DefaultMeta, + defaultChangelogFrontmatterSchema, + defaultFrontmatterSchema, + defaultMetaSchema, +} from "./schema"; diff --git a/packages/docs/src/lint/reporters.ts b/packages/docs/src/lint/reporters.ts new file mode 100644 index 0000000..d262e7a --- /dev/null +++ b/packages/docs/src/lint/reporters.ts @@ -0,0 +1,82 @@ +import type { LintResult, LintViolation } from "./runner"; + +export type ReporterFormat = "pretty" | "json" | "github"; + +function severitySymbol(severity: LintViolation["severity"]): string { + return severity === "error" ? "×" : "!"; +} + +/** + * Human-readable reporter for terminal output. Groups violations by file. + */ +export function prettyReporter(result: LintResult): string { + if (result.violations.length === 0) { + return `All ${result.summary.filesScanned} files pass.\n`; + } + + const byFile = new Map(); + for (const violation of result.violations) { + const existing = byFile.get(violation.file) ?? []; + existing.push(violation); + byFile.set(violation.file, existing); + } + + const lines: string[] = []; + const sortedFiles = Array.from(byFile.keys()).sort(); + for (const file of sortedFiles) { + lines.push(file); + for (const violation of byFile.get(file) ?? []) { + const symbol = severitySymbol(violation.severity); + const tag = `[${violation.severity} ${violation.rule}]`; + lines.push(` ${symbol} ${tag} ${violation.message}`); + } + lines.push(""); + } + + lines.push( + `${result.summary.filesScanned} files scanned — ${result.summary.errors} error(s), ${result.summary.warnings} warning(s)` + ); + + return `${lines.join("\n")}\n`; +} + +/** + * JSON reporter — stable machine-readable shape for CI pipelines and custom + * tooling. + */ +export function jsonReporter(result: LintResult): string { + return `${JSON.stringify(result, null, 2)}\n`; +} + +/** + * GitHub Actions workflow-command reporter. Each violation becomes a + * `::error::` or `::warning::` annotation that attaches to the file in the PR + * review UI. + */ +export function githubReporter(result: LintResult): string { + const lines: string[] = []; + for (const violation of result.violations) { + const command = violation.severity === "error" ? "error" : "warning"; + const message = violation.field + ? `[${violation.rule}] ${violation.message}` + : violation.message; + lines.push(`::${command} file=${violation.file}::${message}`); + } + lines.push( + `::notice::docs lint: ${result.summary.filesScanned} files scanned, ${result.summary.errors} error(s), ${result.summary.warnings} warning(s)` + ); + return `${lines.join("\n")}\n`; +} + +export function renderReport( + format: ReporterFormat, + result: LintResult +): string { + if (format === "json") { + return jsonReporter(result); + } + if (format === "github") { + return githubReporter(result); + } + return prettyReporter(result); +} diff --git a/packages/docs/src/lint/runner.ts b/packages/docs/src/lint/runner.ts new file mode 100644 index 0000000..7d1e08c --- /dev/null +++ b/packages/docs/src/lint/runner.ts @@ -0,0 +1,257 @@ +import { existsSync } from "node:fs"; +import { readFile } from "node:fs/promises"; +import { relative, sep } from "node:path"; +import fg from "fast-glob"; +import matter from "gray-matter"; +import * as v from "valibot"; +import { + allowedKeys, + defaultChangelogFrontmatterSchema, + defaultFrontmatterSchema, + defaultMetaSchema, +} from "./schema"; + +export type LintSeverity = "error" | "warn"; + +export type LintRule = + | "schema" + | "unknown-field" + | "missing-field" + | "parse-error"; + +export type LintViolation = { + file: string; + kind: "frontmatter" | "changelog" | "meta"; + severity: LintSeverity; + rule: LintRule; + field?: string; + message: string; +}; + +export type LintSummary = { + filesScanned: number; + errors: number; + warnings: number; +}; + +export type LintResult = { + violations: LintViolation[]; + summary: LintSummary; +}; + +export type LintOptions = { + /** Root directory containing .mdx/.md files and meta.json */ + srcDir: string; + /** Optional subdirectory that uses the changelog schema instead */ + changelogDir?: string; + /** + * Glob patterns (relative to srcDir) to skip — use for include-only partials + * like `shared/**` or orphan drafts. Matched against POSIX-style relative + * paths. Default: ["**\/shared/**"] + */ + ignore?: string[]; + /** Treat unknown frontmatter fields as warnings (default) or errors */ + unknownFieldSeverity?: LintSeverity; + /** Custom schemas override the defaults */ + schemas?: { + frontmatter?: v.ObjectSchema< + v.ObjectEntries, + v.ErrorMessage | undefined + >; + changelogFrontmatter?: v.ObjectSchema< + v.ObjectEntries, + v.ErrorMessage | undefined + >; + meta?: v.ObjectSchema< + v.ObjectEntries, + v.ErrorMessage | undefined + >; + }; +}; + +async function glob( + root: string, + patterns: string[], + ignore: string[] +): Promise { + if (!existsSync(root)) { + return []; + } + return await fg(patterns, { + cwd: root, + absolute: true, + onlyFiles: true, + ignore, + dot: false, + }); +} + +function toRelative(srcDir: string, file: string): string { + const rel = relative(srcDir, file); + return rel.split(sep).join("/"); +} + +function isUnderDir(file: string, dir: string | undefined): boolean { + if (!dir) { + return false; + } + const rel = relative(dir, file); + return !(rel.startsWith("..") || rel.startsWith(sep)); +} + +function pathForIssue(issue: v.BaseIssue): string | undefined { + const segments = issue.path?.map((p) => String(p.key)).filter(Boolean); + return segments && segments.length > 0 ? segments.join(".") : undefined; +} + +function validate>( + schema: v.ObjectSchema< + v.ObjectEntries, + v.ErrorMessage | undefined + >, + data: T, + file: string, + kind: LintViolation["kind"], + unknownSeverity: LintSeverity +): LintViolation[] { + const out: LintViolation[] = []; + const result = v.safeParse(schema, data); + + if (!result.success) { + for (const issue of result.issues) { + const field = pathForIssue(issue); + out.push({ + file, + kind, + severity: "error", + rule: "schema", + field, + message: field ? `${field}: ${issue.message}` : issue.message, + }); + } + } + + const allowed = allowedKeys(schema); + for (const key of Object.keys(data)) { + if (!allowed.has(key)) { + out.push({ + file, + kind, + severity: unknownSeverity, + rule: "unknown-field", + field: key, + message: `unknown field \`${key}\` — not in schema and not read by any consumer`, + }); + } + } + + return out; +} + +/** + * Walk `srcDir` and validate every .md/.mdx frontmatter plus every meta.json + * file. Returns a list of violations with a summary count. + */ +export const DEFAULT_IGNORE_GLOBS = [ + "**/shared/**", + "**/_partials/**", + "**/node_modules/**", +]; + +export async function lintDocs(options: LintOptions): Promise { + const { + srcDir, + changelogDir, + ignore = DEFAULT_IGNORE_GLOBS, + unknownFieldSeverity = "warn", + schemas = {}, + } = options; + + const frontmatterSchema = schemas.frontmatter ?? defaultFrontmatterSchema; + const changelogSchema = + schemas.changelogFrontmatter ?? defaultChangelogFrontmatterSchema; + const metaSchema = schemas.meta ?? defaultMetaSchema; + + const violations: LintViolation[] = []; + + const mdxFiles = await glob(srcDir, ["**/*.mdx", "**/*.md"], ignore); + const metaFiles = await glob(srcDir, ["**/meta.json"], ignore); + const filesScanned = mdxFiles.length + metaFiles.length; + + for (const file of mdxFiles) { + let data: Record; + try { + const raw = await readFile(file, "utf-8"); + const parsed = matter(raw); + data = parsed.data as Record; + } catch (error) { + violations.push({ + file: toRelative(srcDir, file), + kind: "frontmatter", + severity: "error", + rule: "parse-error", + message: `failed to parse frontmatter: ${String(error)}`, + }); + continue; + } + + const isChangelog = isUnderDir(file, changelogDir); + const schemaToUse = isChangelog ? changelogSchema : frontmatterSchema; + const kind: LintViolation["kind"] = isChangelog + ? "changelog" + : "frontmatter"; + + violations.push( + ...validate( + schemaToUse, + data, + toRelative(srcDir, file), + kind, + unknownFieldSeverity + ) + ); + } + + for (const file of metaFiles) { + let data: Record; + try { + const raw = await readFile(file, "utf-8"); + data = JSON.parse(raw) as Record; + } catch (error) { + violations.push({ + file: toRelative(srcDir, file), + kind: "meta", + severity: "error", + rule: "parse-error", + message: `failed to parse meta.json: ${String(error)}`, + }); + continue; + } + + violations.push( + ...validate( + metaSchema, + data, + toRelative(srcDir, file), + "meta", + unknownFieldSeverity + ) + ); + } + + const summary: LintSummary = { + filesScanned, + errors: violations.filter((violation) => violation.severity === "error") + .length, + warnings: violations.filter((violation) => violation.severity === "warn") + .length, + }; + + return { violations, summary }; +} + +export type { + DefaultChangelogFrontmatter, + DefaultFrontmatter, + DefaultMeta, +} from "./schema"; diff --git a/packages/docs/src/lint/schema.ts b/packages/docs/src/lint/schema.ts new file mode 100644 index 0000000..16291bc --- /dev/null +++ b/packages/docs/src/lint/schema.ts @@ -0,0 +1,119 @@ +import * as v from "valibot"; + +const SEMVER_PATTERN = /^\d+\.\d+\.\d+$/; +const semver = v.pipe( + v.string(), + v.regex(SEMVER_PATTERN, "Must be a valid semantic version (e.g. 1.2.3)") +); + +const isoDate = v.pipe( + v.string(), + v.check((value: string) => !Number.isNaN(new Date(value).getTime()), { + message: "Must be an ISO-8601 date or parseable date string", + } as never) +); + +/** + * Cross-framework page link used by the "Available in other SDKs" widget. + * Matches the monorepo's `availableIn` schema at c15t-docs/schema/docs.ts. + */ +const availableInEntry = v.object({ + framework: v.string(), + url: v.optional(v.string()), + title: v.optional(v.string()), +}); + +/** + * Default frontmatter schema for docs pages. Mirrors the fields the monorepo + * actually *consumes* at render time (verified via grep in + * apps/c15t-docs/src). Anything defined in the monorepo's schema but never + * read (deprecatedSince, since, tocStyle as of 2026-04-17) is deliberately + * omitted so the linter flags them as legacy cruft. + * + * Callers can override via `lintDocs({ schemas: { frontmatter: ... } })`. + */ +export const defaultFrontmatterSchema = v.object({ + title: v.pipe(v.string(), v.minLength(1, "must not be empty")), + description: v.optional(v.string()), + icon: v.optional(v.string()), + + // Lifecycle + deprecated: v.optional(v.boolean()), + deprecatedReason: v.optional(v.string()), + experimental: v.optional(v.boolean()), + canary: v.optional(v.boolean()), + new: v.optional(v.boolean()), + draft: v.optional(v.boolean()), + + // Categorization + tags: v.optional(v.array(v.string())), + availableIn: v.optional(v.array(availableInEntry)), + + // Layout + full: v.optional(v.boolean()), + // Note: `lastModified` and `lastAuthor` are intentionally NOT in this + // schema. They are auto-populated during convert via + // `enrichFrontmatterFromGit` and should not be hand-authored — the linter + // will flag any source-authored `lastModified` as unknown-field. +}); + +export type DefaultFrontmatter = v.InferOutput; + +/** + * Default schema for changelog entries. Mirrors c15t-docs/schema/changelog.ts. + * Enable via `lintDocs({ changelogDir: "./content/changelog" })`. + */ +export const defaultChangelogFrontmatterSchema = v.object({ + title: v.pipe(v.string(), v.minLength(1)), + description: v.optional(v.string()), + icon: v.optional(v.string()), + version: semver, + date: isoDate, + type: v.optional( + v.picklist(["release", "improvement", "retired", "deprecation"]) + ), + tags: v.optional(v.array(v.string())), + canary: v.optional(v.boolean()), + authors: v.optional(v.union([v.string(), v.array(v.string())])), + draft: v.optional(v.boolean()), +}); + +export type DefaultChangelogFrontmatter = v.InferOutput< + typeof defaultChangelogFrontmatterSchema +>; + +/** + * Default schema for Fumadocs-style `meta.json` files that drive sidebar + * ordering and section labels. `pages` is the only field Fumadocs requires; + * everything else is optional. + */ +export const defaultMetaSchema = v.object({ + title: v.optional(v.pipe(v.string(), v.minLength(1))), + pages: v.array(v.string()), + root: v.optional(v.boolean()), + icon: v.optional(v.string()), + defaultOpen: v.optional(v.boolean()), + nav: v.optional( + v.object({ + sidebar: v.optional(v.picklist(["section", "combined"])), + label: v.optional(v.string()), + mode: v.optional(v.string()), + }) + ), +}); + +export type DefaultMeta = v.InferOutput; + +/** + * Extract the set of allowed top-level keys from a valibot object schema. + * Used to flag unknown fields as warnings without making the schema itself + * strict (which would turn unknowns into hard errors). + */ +export function allowedKeys( + schema: v.ObjectSchema< + v.ObjectEntries, + v.ErrorMessage | undefined + > +): Set { + return new Set(Object.keys(schema.entries)); +} diff --git a/packages/docs/src/llm/index.ts b/packages/docs/src/llm/index.ts new file mode 100644 index 0000000..c54783f --- /dev/null +++ b/packages/docs/src/llm/index.ts @@ -0,0 +1,12 @@ +export { + type CuratedLink, + type CuratedSection, + type FullTopic, + generateLLMFullFiles, + generateLLMSummaries, + type LLMFullConfig, + type LLMSummariesConfig, + type MarkdownDoc, + type ProductInfo, + type SourceDoc, +} from "./llm"; diff --git a/packages/docs/src/llm/llm.ts b/packages/docs/src/llm/llm.ts new file mode 100644 index 0000000..fca9839 --- /dev/null +++ b/packages/docs/src/llm/llm.ts @@ -0,0 +1,469 @@ +import { existsSync } from "node:fs"; +import { mkdir, readdir, readFile, writeFile } from "node:fs/promises"; +import path from "node:path"; +import matter from "gray-matter"; + +const DOCS_DIRNAME = "docs"; +const TRAILING_SLASHES_PATTERN = /\/+$/; +const WINDOWS_PATH_PATTERN = /\\/g; +const INDEX_SEGMENT_PATTERN = /\/index$/; +const ROOT_INDEX_PATTERN = /^index$/; +const MD_EXTENSION_PATTERN = /\.(md|mdx)$/; +const MD_ONLY_EXTENSION_PATTERN = /\.md$/; +const SEPARATOR_PATTERN = /[-_]/; +const WHITESPACE_PATTERN = /\s+/g; + +export type SourceDoc = { + title: string; + description: string; + urlPath: string; + absoluteUrl: string; + relativePath: string; +}; + +export type MarkdownDoc = SourceDoc & { + content: string; +}; + +export type CuratedLink = { + urlPath: string; + title?: string; + description?: string; +}; + +export type CuratedSection = { + title: string; + description?: string; + links: CuratedLink[]; +}; + +export type FullTopic = { + slug: string; + title: string; + description: string; + includePrefixes: string[]; +}; + +export type ProductInfo = { + /** Product display name, e.g. "DSAR SDK" */ + name: string; + /** Short one-line summary, rendered as a blockquote at the top of llms.txt */ + summary: string; + /** Bullets rendered under "## Product Summary" */ + bullets?: string[]; + /** Curated links rendered under "## Best Starting Points" */ + bestStartingPoints?: CuratedLink[]; + /** Optional agent guidance paragraph at the bottom of llms.txt */ + agentGuidance?: string; +}; + +export type LLMSummariesConfig = { + srcDir: string; + outDir: string; + baseUrl?: string; + product: ProductInfo; + /** Sections rendered in /docs/llms.txt */ + docsSections?: CuratedSection[]; +}; + +export type LLMFullConfig = { + outDir: string; + baseUrl?: string; + product: Pick; + topics: FullTopic[]; +}; + +function titleize(input: string): string { + return input + .split(SEPARATOR_PATTERN) + .filter(Boolean) + .map((segment) => segment.charAt(0).toUpperCase() + segment.slice(1)) + .join(" "); +} + +function normalizeDescription(input: string): string { + return input.replace(WHITESPACE_PATTERN, " ").trim(); +} + +function normalizeBaseUrl(baseUrl?: string): string { + const resolved = + baseUrl?.trim() || + process.env.NEXT_PUBLIC_SITE_URL || + (process.env.NEXT_PUBLIC_VERCEL_PROJECT_PRODUCTION_URL + ? `https://${process.env.NEXT_PUBLIC_VERCEL_PROJECT_PRODUCTION_URL}` + : undefined) || + (process.env.NEXT_PUBLIC_VERCEL_URL + ? `https://${process.env.NEXT_PUBLIC_VERCEL_URL}` + : undefined) || + (process.env.VERCEL_URL + ? `https://${process.env.VERCEL_URL}` + : undefined) || + "http://localhost:3000"; + + return resolved.replace(TRAILING_SLASHES_PATTERN, ""); +} + +function toUrlPath(relativePath: string): string { + const normalizedPath = relativePath + .replace(WINDOWS_PATH_PATTERN, "/") + .replace(MD_EXTENSION_PATTERN, "") + .replace(INDEX_SEGMENT_PATTERN, "") + .replace(ROOT_INDEX_PATTERN, ""); + + return normalizedPath.length > 0 ? `/docs/${normalizedPath}` : "/docs"; +} + +function toAbsoluteUrl(urlPath: string, baseUrl: string): string { + if (urlPath.startsWith("http://") || urlPath.startsWith("https://")) { + return urlPath; + } + return `${baseUrl}${urlPath}`; +} + +function isIncluded(relativePath: string, prefixes: string[]): boolean { + return prefixes.some((raw) => { + const prefix = raw.replace(TRAILING_SLASHES_PATTERN, ""); + return relativePath === prefix || relativePath.startsWith(`${prefix}/`); + }); +} + +type RenderedLink = { + title: string; + absoluteUrl: string; + description: string; +}; + +function renderLink(link: RenderedLink): string { + return `- [${link.title}](${link.absoluteUrl}): ${link.description}`; +} + +function renderSection( + section: CuratedSection, + resolvedLinks: RenderedLink[] +): string { + const lines = [`## ${section.title}`]; + if (section.description) { + lines.push("", section.description); + } + lines.push("", ...resolvedLinks.map(renderLink)); + return lines.join("\n"); +} + +async function collectFiles( + rootDir: string, + extensions: string[] +): Promise { + const entries = await readdir(rootDir, { withFileTypes: true }); + const files = await Promise.all( + entries.map(async (entry) => { + const absolutePath = path.join(rootDir, entry.name); + if (entry.isDirectory()) { + return collectFiles(absolutePath, extensions); + } + return extensions.includes(path.extname(entry.name)) + ? [absolutePath] + : []; + }) + ); + return files.flat(); +} + +async function readSourceDocs( + srcDir: string, + baseUrl: string +): Promise> { + const docsDir = path.join(srcDir, DOCS_DIRNAME); + const docs = new Map(); + + if (!existsSync(docsDir)) { + return docs; + } + + const files = await collectFiles(docsDir, [".md", ".mdx"]); + + const entries = await Promise.all( + files.map(async (filePath) => { + const relativePath = path + .relative(docsDir, filePath) + .replace(WINDOWS_PATH_PATTERN, "/"); + const raw = await readFile(filePath, "utf-8"); + const parsed = matter(raw); + const title = + String(parsed.data.title ?? "").trim() || + titleize(path.basename(relativePath, path.extname(relativePath))) || + "Untitled"; + const description = normalizeDescription( + String(parsed.data.description ?? "") + ); + const urlPath = toUrlPath(relativePath); + return { + urlPath, + doc: { + title, + description, + urlPath, + absoluteUrl: toAbsoluteUrl(urlPath, baseUrl), + relativePath: relativePath.replace(MD_EXTENSION_PATTERN, ""), + }, + }; + }) + ); + + for (const { urlPath, doc } of entries) { + docs.set(urlPath, doc); + } + + return docs; +} + +async function readMarkdownDocs( + outDir: string, + baseUrl: string +): Promise { + const docsDir = path.join(outDir, DOCS_DIRNAME); + if (!existsSync(docsDir)) { + return []; + } + + const files = await collectFiles(docsDir, [".md"]); + const docs: MarkdownDoc[] = []; + + for (const filePath of files) { + const relativePath = path + .relative(docsDir, filePath) + .replace(WINDOWS_PATH_PATTERN, "/"); + const raw = await readFile(filePath, "utf-8"); + const parsed = matter(raw); + const title = + String(parsed.data.title ?? "").trim() || + titleize(path.basename(relativePath, ".md")) || + "Untitled"; + const description = normalizeDescription( + String(parsed.data.description ?? "") + ); + const urlPath = toUrlPath(relativePath); + + docs.push({ + title, + description, + urlPath, + absoluteUrl: toAbsoluteUrl(urlPath, baseUrl), + relativePath: relativePath.replace(MD_ONLY_EXTENSION_PATTERN, ""), + content: parsed.content.trim(), + }); + } + + return docs.sort((left, right) => left.urlPath.localeCompare(right.urlPath)); +} + +function resolveCuratedLink( + link: CuratedLink, + sourceDocs: Map, + baseUrl: string +): RenderedLink { + const sourceDoc = sourceDocs.get(link.urlPath); + return { + title: + link.title ?? + sourceDoc?.title ?? + titleize( + link.urlPath.split("/").filter(Boolean).at(-1) ?? "documentation" + ), + description: + link.description ?? sourceDoc?.description ?? "No description provided.", + absoluteUrl: toAbsoluteUrl(sourceDoc?.urlPath ?? link.urlPath, baseUrl), + }; +} + +function renderProductSummary( + product: ProductInfo, + sourceDocs: Map, + baseUrl: string +): string { + const startingPoints = product.bestStartingPoints ?? []; + const links = startingPoints.map((link) => + resolveCuratedLink(link, sourceDocs, baseUrl) + ); + + const sections: string[] = [`# ${product.name}`, "", `> ${product.summary}`]; + + if (product.bullets && product.bullets.length > 0) { + sections.push( + "", + "## Product Summary", + "", + ...product.bullets.map((bullet) => `- ${bullet}`) + ); + } + + if (links.length > 0) { + sections.push("", "## Best Starting Points", "", ...links.map(renderLink)); + } + + if (product.agentGuidance) { + sections.push("", "## Agent Guidance", "", product.agentGuidance); + } + + return sections.join("\n"); +} + +function renderDocsSummary( + product: ProductInfo, + sourceDocs: Map, + baseUrl: string, + docsSections: CuratedSection[] +): string { + const sections = docsSections.map((section) => + renderSection( + section, + section.links.map((link) => resolveCuratedLink(link, sourceDocs, baseUrl)) + ) + ); + + return `# ${product.name} Documentation + +> Curated documentation map for developers and coding agents working with ${product.name}. + +## How To Use This File + +Read the summary links first. If the summary is not enough, choose the smallest relevant topic file from \`/docs/llms-full.txt\`. + +${sections.join("\n\n")}`; +} + +function renderDocsFullRouter( + product: Pick, + baseUrl: string, + topics: FullTopic[] +): string { + const links = topics.map((topic) => ({ + title: `${topic.title} Full Context`, + description: topic.description, + absoluteUrl: toAbsoluteUrl(`/docs/llms-full/${topic.slug}.txt`, baseUrl), + })); + + return [ + `# ${product.name} Documentation Full Context`, + "", + "> Choose the smallest topic file that matches the task.", + "", + "## Topics", + "", + ...links.map(renderLink), + ].join("\n"); +} + +function renderRootFullRouter( + product: Pick, + baseUrl: string +): string { + return [ + `# ${product.name} Full Context Router`, + "", + "> Start with the product summary, then the curated docs summary, then one topic-specific full-context file if needed.", + "", + "## Recommended Flow", + "", + `- [Product Summary](${toAbsoluteUrl("/llms.txt", baseUrl)}): Short product-oriented overview of ${product.name}.`, + `- [Documentation Summary](${toAbsoluteUrl("/docs/llms.txt", baseUrl)}): Curated docs map for implementation work.`, + `- [Documentation Full Router](${toAbsoluteUrl("/docs/llms-full.txt", baseUrl)}): Topic-specific deep-context files.`, + ].join("\n"); +} + +function renderTopicDocument( + product: Pick, + topic: FullTopic, + docs: MarkdownDoc[] +): string { + const topicDocs = docs.filter((doc) => + isIncluded(doc.relativePath, topic.includePrefixes) + ); + const links = topicDocs.map((doc) => ({ + title: doc.title, + absoluteUrl: doc.absoluteUrl, + description: doc.description || "No description provided.", + })); + const contentBlocks = topicDocs.map((doc) => { + const description = doc.description ? `${doc.description}\n` : ""; + return `# ${doc.title} +URL: ${doc.absoluteUrl} +${description} +${doc.content}`.trim(); + }); + + return [ + `# ${product.name} ${topic.title} Full Context`, + "", + `> ${topic.description}`, + "", + "## Included Pages", + "", + links.map(renderLink).join("\n"), + "", + "## Content", + "", + contentBlocks.join("\n\n"), + ].join("\n"); +} + +/** + * Generate `/llms.txt` (product summary) and `/docs/llms.txt` (curated docs + * map) by reading frontmatter from .md/.mdx files under `{srcDir}/docs/`. + */ +export async function generateLLMSummaries( + config: LLMSummariesConfig +): Promise { + const srcDir = path.resolve(config.srcDir); + const outDir = path.resolve(config.outDir); + const baseUrl = normalizeBaseUrl(config.baseUrl); + const sourceDocs = await readSourceDocs(srcDir, baseUrl); + + await mkdir(path.join(outDir, DOCS_DIRNAME), { recursive: true }); + await writeFile( + path.join(outDir, "llms.txt"), + renderProductSummary(config.product, sourceDocs, baseUrl) + ); + + if (config.docsSections && config.docsSections.length > 0) { + await writeFile( + path.join(outDir, DOCS_DIRNAME, "llms.txt"), + renderDocsSummary( + config.product, + sourceDocs, + baseUrl, + config.docsSections + ) + ); + } +} + +/** + * Generate the full-context routers and one topic-specific .txt per topic + * under `/docs/llms-full/`. Reads generated .md files from `{outDir}/docs/`. + */ +export async function generateLLMFullFiles( + config: LLMFullConfig +): Promise { + const outDir = path.resolve(config.outDir); + const baseUrl = normalizeBaseUrl(config.baseUrl); + const markdownDocs = await readMarkdownDocs(outDir, baseUrl); + + await mkdir(path.join(outDir, DOCS_DIRNAME, "llms-full"), { + recursive: true, + }); + await writeFile( + path.join(outDir, "llms-full.txt"), + renderRootFullRouter(config.product, baseUrl) + ); + await writeFile( + path.join(outDir, DOCS_DIRNAME, "llms-full.txt"), + renderDocsFullRouter(config.product, baseUrl, config.topics) + ); + + for (const topic of config.topics) { + await writeFile( + path.join(outDir, DOCS_DIRNAME, "llms-full", `${topic.slug}.txt`), + renderTopicDocument(config.product, topic, markdownDocs) + ); + } +} diff --git a/packages/docs/src/remark/index.ts b/packages/docs/src/remark/index.ts new file mode 100644 index 0000000..b185704 --- /dev/null +++ b/packages/docs/src/remark/index.ts @@ -0,0 +1,46 @@ +/** @biome-ignore lint/performance/noBarrelFile: package entry point */ + +export * from "./libs"; +export { remarkCalloutToMarkdown } from "./plugins/callout.remark"; +export { remarkCardsToMarkdown } from "./plugins/cards.remark"; +export { remarkInclude } from "./plugins/include.remark"; +export { remarkLinkIcon } from "./plugins/link-icon.remark"; +export { remarkMermaidToMarkdown } from "./plugins/mermaid.remark"; +export { remarkPackageCommandTabsToMarkdown } from "./plugins/package-command-tabs.remark"; +export { remarkRemoveImports } from "./plugins/remove-imports.remark"; +export { remarkStepsToMarkdown } from "./plugins/steps.remark"; +export { remarkTabsToMarkdown } from "./plugins/tabs.remark"; +export { + extractTocFromContent, + extractTocFromFile, + type TOCItem, +} from "./plugins/toc-extract.remark"; +export { + extractTypeFromFile, + remarkTypeTableToMarkdown, +} from "./plugins/type-table.remark"; + +import { remarkCalloutToMarkdown } from "./plugins/callout.remark"; +import { remarkCardsToMarkdown } from "./plugins/cards.remark"; +import { remarkMermaidToMarkdown } from "./plugins/mermaid.remark"; +import { remarkPackageCommandTabsToMarkdown } from "./plugins/package-command-tabs.remark"; +import { remarkRemoveImports } from "./plugins/remove-imports.remark"; +import { remarkStepsToMarkdown } from "./plugins/steps.remark"; +import { remarkTabsToMarkdown } from "./plugins/tabs.remark"; +import { remarkTypeTableToMarkdown } from "./plugins/type-table.remark"; + +/** + * Default remark plugins for MDX → Markdown conversion for agent/LLM docs. + * Order matters: imports are stripped first, then components are flattened + * into markdown equivalents. + */ +export const defaultRemarkPlugins = [ + remarkRemoveImports, + remarkCalloutToMarkdown, + remarkCardsToMarkdown, + remarkMermaidToMarkdown, + remarkPackageCommandTabsToMarkdown, + remarkStepsToMarkdown, + remarkTabsToMarkdown, + remarkTypeTableToMarkdown, +] as const; diff --git a/packages/docs/src/remark/libs/attributes.ts b/packages/docs/src/remark/libs/attributes.ts new file mode 100644 index 0000000..d1d8f98 --- /dev/null +++ b/packages/docs/src/remark/libs/attributes.ts @@ -0,0 +1,71 @@ +import JSON5 from "json5"; +import { isAttrValueExpression } from "./guards"; +import type { MdxJsxAttribute, MdxNode } from "./types"; + +/** + * Get the value of an MDX JSX attribute + */ +export function getAttributeValue( + node: T, + key: string +): string | null { + const attrs = (node.attributes ?? []) as readonly MdxJsxAttribute[]; + const attr = attrs.find( + (a) => a.type === "mdxJsxAttribute" && a.name === key + ); + + if (!attr) { + return null; + } + + const v = attr.value; + if (typeof v === "string") { + return v; + } + if (v === null) { + return "true"; + } + if (isAttrValueExpression(v)) { + return String(v.value); + } + return null; +} + +/** + * Parse a JS-like array literal from an MDX attribute value expression. + * + * Accepts flexible array syntax including: + * - Single/double quotes: ['item1', "item2"] + * - Unquoted object keys: [item1, item2] + * - Trailing commas: ['item1', 'item2',] + * - Comments: ['item1', // comment] + * - Mixed quotes: ["item1", 'item2'] + * + * Falls back to null if: + * - Input is empty or null + * - Input is not bracketed + * - Parsed result is not an array + * - Array contains non-string elements + * - JSON5 parsing fails + * + * @param raw - The raw attribute value to parse + * @returns Array of strings or null if parsing fails + */ +export function parseItemsArray(raw: string | null): string[] | null { + if (!raw) { + return null; + } + const trimmed = raw.trim(); + // Require bracketed array syntax + if (!(trimmed.startsWith("[") && trimmed.endsWith("]"))) { + return null; + } + try { + const parsed = JSON5.parse(trimmed); + return Array.isArray(parsed) && parsed.every((x) => typeof x === "string") + ? parsed + : null; + } catch { + return null; + } +} diff --git a/packages/docs/src/remark/libs/content-processor.ts b/packages/docs/src/remark/libs/content-processor.ts new file mode 100644 index 0000000..808d92a --- /dev/null +++ b/packages/docs/src/remark/libs/content-processor.ts @@ -0,0 +1,88 @@ +/** @biome-ignore lint/complexity/noExcessiveCognitiveComplexity: this is okay */ +import type { Blockquote, Node, Paragraph, Table } from "mdast"; +import { toString as mdastToString } from "mdast-util-to-string"; +import { u } from "unist-builder"; +import { is } from "unist-util-is"; +import { + extractBlockquoteContent, + extractParagraphContent, + normalizeWhitespace, + processContentText, +} from "./text"; + +/** + * Process a single content node and return appropriate AST node + * Shared utility for processing content nodes across different plugins + * Handles paragraphs, tables, blockquotes, code blocks, text nodes, and other content types + */ +export function processContentNode( + node: Node +): Paragraph | Table | Blockquote | Node | null { + if (is(node, "paragraph")) { + const content = extractParagraphContent(node as Paragraph); + const first = content[0]; + if (content.length > 0 && first) { + return { + type: "paragraph", + children: [{ type: "text", value: first }], + } as Paragraph; + } + return null; + } + if (is(node, "table")) { + // Return the table node as-is instead of extracting text content + // This preserves the full table structure including all rows + return node as Table; + } + if (is(node, "blockquote")) { + const content = extractBlockquoteContent(node as Blockquote); + const first = content[0]; + if (content.length > 0 && first) { + return processContentText(first); + } + return null; + } + if (node.type === "code") { + // Handle code blocks directly as AST nodes + const codeNode = node as { lang?: string; value?: string }; + return u( + "code", + { lang: codeNode.lang || "" }, + codeNode.value || "" + ) as Node; + } + if (node.type === "text") { + // Skip whitespace-only text nodes + const textNode = node as unknown as { value: string }; + if (textNode.value.trim()) { + const normalizedText = normalizeWhitespace(textNode.value, true); + return { + type: "paragraph", + children: [{ type: "text", value: normalizedText }], + } as Paragraph; + } + return null; + } + // Handle any other node type by extracting text content + const nodeText = mdastToString(node); + if (nodeText.trim()) { + const cleanedText = normalizeWhitespace(nodeText, true); + return processContentText(cleanedText); + } + + return null; +} + +/** + * Process an array of content nodes and add them to a replacement array + * Useful for plugins that need to process multiple nodes at once + */ +export function processContentNodes(nodes: Node[], replacement: Node[]): void { + for (const node of nodes) { + const processedNode = processContentNode(node); + + if (processedNode) { + replacement.push(processedNode); + } + } +} diff --git a/packages/docs/src/remark/libs/generic-processor.ts b/packages/docs/src/remark/libs/generic-processor.ts new file mode 100644 index 0000000..b28618d --- /dev/null +++ b/packages/docs/src/remark/libs/generic-processor.ts @@ -0,0 +1,94 @@ +import type { Parent, Root, RootContent } from "mdast"; +import { SKIP, visit } from "unist-util-visit"; +import { hasName } from "./guards"; +import type { MdxNode } from "./types"; + +/** + * Function signature for processing a JSX component node + */ +type ComponentProcessor = ( + node: MdxNode, + index: number, + parent: Parent +) => RootContent[] | undefined; + +/** + * Generic processor for MDX JSX components that handles the common pattern: + * - Visit MDX JSX elements + * - Filter by component name(s) + * - Process and replace content + * - Handle empty content removal + * + * @param componentName - The name of the JSX component to process, or array of names + * @param processor - Function that processes the node and returns replacement content + * @param removeIfEmpty - If true, removes the node entirely if processor returns empty array + * @returns A unified transformer function + */ +export function createJsxComponentProcessor( + componentName: string | string[], + processor: ComponentProcessor, + removeIfEmpty = true +): (tree: Root) => Root { + const names = Array.isArray(componentName) ? componentName : [componentName]; + + return (tree: Root): Root => { + visit( + tree, + ["mdxJsxFlowElement", "mdxJsxTextElement"], + (node, index, parent) => { + if (!parent || typeof index !== "number") { + return; + } + + const isValidComponent = names.some((name) => hasName(node, name)); + if (!isValidComponent) { + return; + } + + const result = processor(node as MdxNode, index, parent); + + // If processor returns void, assume it handled replacement internally + if (result === undefined) { + return SKIP; + } + + // Handle empty content + if (result.length === 0) { + if (removeIfEmpty) { + parent.children.splice(index, 1); + return SKIP; + } + // If not removing empty, just continue without SKIP to leave node as-is + return; + } + + // Replace the node with processed content + parent.children.splice(index, 1, ...result); + return SKIP; + } + ); + return tree; + }; +} + +/** + * Simplified processor for components that return a single replacement node + */ +export function createSimpleJsxComponentProcessor( + componentName: string, + processor: ( + node: MdxNode, + index: number, + parent: Parent + ) => RootContent | null, + removeIfEmpty = true +) { + return createJsxComponentProcessor( + componentName, + (node, index, parent) => { + const result = processor(node, index, parent); + return result ? [result] : []; + }, + removeIfEmpty + ); +} diff --git a/packages/docs/src/remark/libs/guards.ts b/packages/docs/src/remark/libs/guards.ts new file mode 100644 index 0000000..f193429 --- /dev/null +++ b/packages/docs/src/remark/libs/guards.ts @@ -0,0 +1,35 @@ +import type { MdxJsxAttributeValueExpression, MdxNode } from "./types"; + +/** + * Type guard to check if a node is an MDX JSX element + */ +export function isMdxNode(node: unknown): node is MdxNode { + if (typeof node !== "object" || node === null) { + return false; + } + const t = (node as { type?: unknown }).type; + return t === "mdxJsxFlowElement" || t === "mdxJsxTextElement"; +} + +/** + * Type guard to check if a node is an MDX JSX element with a specific name + */ +export function hasName( + node: unknown, + name: T +): node is MdxNode & { name: T } { + return isMdxNode(node) && (node as MdxNode).name === name; +} + +/** + * Type guard to check if a value is an MDX JSX attribute value expression + */ +export function isAttrValueExpression( + v: unknown +): v is MdxJsxAttributeValueExpression { + return Boolean( + v && + typeof v === "object" && + (v as { type?: unknown }).type === "mdxJsxAttributeValueExpression" + ); +} diff --git a/packages/docs/src/remark/libs/index.ts b/packages/docs/src/remark/libs/index.ts new file mode 100644 index 0000000..0e8161c --- /dev/null +++ b/packages/docs/src/remark/libs/index.ts @@ -0,0 +1,22 @@ +/** @biome-ignore lint/performance/noBarrelFile: this is a barrel file not using default exports */ +export { getAttributeValue, parseItemsArray } from "./attributes"; +export { processContentNode } from "./content-processor"; +export { createJsxComponentProcessor } from "./generic-processor"; +export { hasName } from "./guards"; +export { + createHeading, + createInlineCode, + createLink, + createOrderedList, + createParagraph, + createStrong, + createStrongParagraph, + createTable, + createTableRow, + createText, +} from "./node-creators"; +export { + extractNodeText, + normalizeWhitespace, +} from "./text"; +export type { MdxNode } from "./types"; diff --git a/packages/docs/src/remark/libs/node-creators.ts b/packages/docs/src/remark/libs/node-creators.ts new file mode 100644 index 0000000..54a6f86 --- /dev/null +++ b/packages/docs/src/remark/libs/node-creators.ts @@ -0,0 +1,166 @@ +import type { + BlockContent, + DefinitionContent, + Heading, + Link, + List, + ListItem, + Paragraph, + PhrasingContent, + RootContent, + Strong, + Table, + TableCell, + TableRow, + Text, +} from "mdast"; + +/** + * Create a text node + */ +export function createText(text: string): Text { + return { type: "text", value: text }; +} + +/** + * Create a strong (bold) text node + */ +export function createStrong(text: string): Strong { + return { type: "strong", children: [createText(text)] }; +} + +/** + * Create an inline code node + */ +export function createInlineCode(value: string): PhrasingContent { + return { type: "inlineCode", value }; +} + +/** + * Create a paragraph node + */ +export function createParagraph(text: string): Paragraph { + return { type: "paragraph", children: [createText(text)] }; +} + +/** + * Create a paragraph with strong emphasis + */ +export function createStrongParagraph(text: string): Paragraph { + return { + type: "paragraph", + children: [createStrong(text)], + }; +} + +/** + * Create a link node + */ +export function createLink( + url: string, + content: string | PhrasingContent[] +): Link { + return { + type: "link", + url, + children: typeof content === "string" ? [createText(content)] : content, + }; +} + +/** + * Create a heading node + */ +export function createHeading( + depth: 1 | 2 | 3 | 4 | 5 | 6, + text: string +): Heading { + return { + type: "heading", + depth, + children: [createText(text)], + }; +} + +/** + * Create a table cell node + */ +export function createTableCell( + content: string | PhrasingContent[] +): TableCell { + const children = + typeof content === "string" ? [createText(content)] : content; + + return { + type: "tableCell", + children, + }; +} + +/** + * Create a table row node + */ +export function createTableRow( + cells: (string | PhrasingContent[])[] +): TableRow { + return { + type: "tableRow", + children: cells.map(createTableCell), + }; +} + +/** + * Create a table with specified headers and rows + */ +export function createTable( + headers: string[], + rows: (string | PhrasingContent[])[][], + align?: ("left" | "center" | "right" | null)[] +): Table { + const headerRow = createTableRow(headers); + const dataRows = rows.map(createTableRow); + + return { + type: "table", + align: align ?? headers.map(() => "left"), + children: [headerRow, ...dataRows], + }; +} + +/** + * Create a list item node + */ +export function createListItem(children: RootContent[]): ListItem { + return { + type: "listItem", + children: children as (BlockContent | DefinitionContent)[], + }; +} + +/** + * Create an ordered list node + */ +export function createOrderedList( + items: ListItem[], + start = 1, + spread = true +): List { + return { + type: "list", + ordered: true, + start, + spread, + children: items, + }; +} + +/** + * Create an unordered list node + */ +export function createUnorderedList(items: ListItem[], spread = true): List { + return { + type: "list", + ordered: false, + spread, + children: items, + }; +} diff --git a/packages/docs/src/remark/libs/text.ts b/packages/docs/src/remark/libs/text.ts new file mode 100644 index 0000000..cc477e7 --- /dev/null +++ b/packages/docs/src/remark/libs/text.ts @@ -0,0 +1,222 @@ +import type { + Blockquote, + Paragraph, + Root, + RootContent, + Table, + TableCell, + TableRow, +} from "mdast"; +import { toString as mdastToString } from "mdast-util-to-string"; +import type { MdxNode } from "./types"; + +// Common regex patterns +const MULTI_WHITESPACE = /\s+/g; +const TRIM_WHITESPACE = /^\s+|\s+$/g; +const HORIZONTAL_WHITESPACE = /[ \t]+/g; +const BOLD_HEADER = /^\s*\*\*(.+?)\*\*\s*$/s; +const BLOCKQUOTE_LINE = /^>\s?/; +/** + * Normalize whitespace in a string + * @param text - The text to normalize + * @param preserveNewlines - If true, preserves newlines while cleaning horizontal whitespace + */ +export function normalizeWhitespace( + text: string, + preserveNewlines = false +): string { + const pattern = preserveNewlines ? HORIZONTAL_WHITESPACE : MULTI_WHITESPACE; + + const normalized = text.replace(pattern, " "); + + return preserveNewlines + ? normalized.replace(TRIM_WHITESPACE, "").trim() + : normalized.trim(); +} + +/** + * Extract text content from MDX node children + */ +export function extractNodeText(children: MdxNode["children"]): string { + const root: Root = { + type: "root", + children: (children as unknown as RootContent[]) ?? [], + }; + return mdastToString(root); +} + +// cleanText function removed - consolidated into normalizeWhitespace with preserveNewlines=true + +/** + * Extract and normalize text from MDX node children + */ +export function extractAndCleanNodeText(children: MdxNode["children"]): string { + return normalizeWhitespace(extractNodeText(children), true); +} + +/** + * Extract text content from a paragraph node + */ +export function extractParagraphContent(node: Paragraph): string[] { + const rawText = extractNodeText(node.children); + if (!rawText.trim()) { + return []; + } + const cleanedText = normalizeWhitespace(rawText, true); + return [cleanedText]; +} + +/** + * Extract markdown content from a table node + */ +export function extractTableContent(node: Table): string[] { + const tableRows = node.children || []; + if (tableRows.length === 0) { + return []; + } + + // Get first row (header) and convert to markdown + const headerRow = tableRows[0] as TableRow; + const headerCells = headerRow.children || []; + const headerText = headerCells + .map((cell: TableCell) => { + const cellText = extractNodeText(cell.children || []); + return cellText.trim(); + }) + .join("|"); + + // Get second row (first data row) and convert to markdown + const dataRow = tableRows[1] as TableRow; + if (!dataRow) { + return []; + } + + const dataCells = dataRow.children || []; + const dataText = dataCells + .map((cell: TableCell) => { + const cellText = extractNodeText(cell.children || []); + return cellText.trim(); + }) + .join("|"); + + return [`${headerText}\n${dataText}`]; +} + +/** + * Extract markdown content from a blockquote node + */ +export function extractBlockquoteContent(node: Blockquote): string[] { + const blockquoteText = extractNodeText(node.children || []); + if (!blockquoteText.trim()) { + return []; + } + return [`> ${blockquoteText.trim()}`]; +} + +/** + * Create a blockquote from content text + */ +export function createBlockquoteFromContent( + contentText: string +): Blockquote | null { + if (!contentText.startsWith("> ")) { + return null; + } + + const lines = contentText.split("\n"); + const paragraphs = lines + .filter((line) => line.trim()) // Remove empty lines + .map((line) => { + // Remove leading > and optional space + const cleanLine = line.replace(BLOCKQUOTE_LINE, ""); + return { + type: "paragraph", + children: [{ type: "text", value: cleanLine }], + }; + }); + + return { + type: "blockquote", + children: paragraphs, + } as Blockquote; +} +/** + * Process content text and return appropriate AST node + */ +export function processContentText( + contentText: string +): Paragraph | Table | Blockquote | null { + // Try to create a table first + const table = createTableFromContent(contentText); + if (table) { + return table; + } + + // Try to create a blockquote + const blockquote = createBlockquoteFromContent(contentText); + if (blockquote) { + return blockquote; + } + + // Check for bold headers with regex to handle whitespace and inner asterisks + const boldHeaderMatch = contentText.match(BOLD_HEADER); + if (boldHeaderMatch?.[1]) { + const headerText = boldHeaderMatch[1].trim(); + return { + type: "paragraph", + children: [ + { type: "strong", children: [{ type: "text", value: headerText }] }, + ], + } as Paragraph; + } + + // Skip empty content + if (contentText.trim() === "") { + return null; + } + + // Regular paragraph content + return { + type: "paragraph", + children: [{ type: "text", value: contentText }], + } as Paragraph; +} + +/** + * Create a table from content text + */ +export function createTableFromContent(contentText: string): Table | null { + if (!(contentText.includes("|") && contentText.includes("\n"))) { + return null; + } + + const lines = contentText.split("\n"); + const firstLine = lines[0]; + const secondLine = lines[1]; + if (lines.length < 2 || !firstLine || !secondLine) { + return null; + } + + const headers = firstLine.split("|").map((h) => h.trim()); + const data = secondLine.split("|").map((d) => d.trim()); + + return { + type: "table", + children: [ + { + type: "tableRow", + children: headers.map((header) => ({ + type: "tableCell", + children: [{ type: "text", value: header }], + })), + }, + { + type: "tableRow", + children: data.map((cell) => ({ + type: "tableCell", + children: [{ type: "text", value: cell }], + })), + }, + ], + } as Table; +} diff --git a/packages/docs/src/remark/libs/types.ts b/packages/docs/src/remark/libs/types.ts new file mode 100644 index 0000000..30793f9 --- /dev/null +++ b/packages/docs/src/remark/libs/types.ts @@ -0,0 +1,14 @@ +import type { MdxJsxFlowElement, MdxJsxTextElement } from "mdast-util-mdx-jsx"; + +/** + * Common type for MDX JSX elements (both flow and text) + */ +export type MdxNode = MdxJsxFlowElement | MdxJsxTextElement; + +/** + * Common type for MDX JSX attributes + */ +export type { + MdxJsxAttribute, + MdxJsxAttributeValueExpression, +} from "mdast-util-mdx-jsx"; diff --git a/packages/docs/src/remark/plugins/callout.remark.ts b/packages/docs/src/remark/plugins/callout.remark.ts new file mode 100644 index 0000000..4dd585f --- /dev/null +++ b/packages/docs/src/remark/plugins/callout.remark.ts @@ -0,0 +1,142 @@ +import type { + Blockquote, + Paragraph, + PhrasingContent, + Root, + Strong, + Text, +} from "mdast"; +import type { Plugin } from "unified"; +import { + createJsxComponentProcessor, + createStrong, + createText, + extractNodeText, + getAttributeValue, + type MdxNode, + normalizeWhitespace, +} from "../libs"; + +type Variant = + | "info" + | "note" + | "tip" + | "warning" + | "success" + | "error" + | "canary" + | "deprecated" + | "experimental"; + +function variantLabelAndEmoji(raw: string | null): { + variant: Variant; + emoji: string; + label: string; +} { + const v = (raw ?? "info").toLowerCase(); + switch (v) { + case "warning": + return { variant: "warning", emoji: "⚠️", label: "Warning:" }; + case "tip": + return { variant: "tip", emoji: "💡", label: "Tip:" }; + case "success": + return { variant: "success", emoji: "✅", label: "Success:" }; + case "error": + return { variant: "error", emoji: "❌", label: "Error:" }; + case "canary": + return { variant: "canary", emoji: "🐤", label: "Canary:" }; + case "deprecated": + return { variant: "deprecated", emoji: "🚫", label: "Deprecated:" }; + case "experimental": + return { variant: "experimental", emoji: "🧪", label: "Experimental:" }; + default: + return { variant: "info", emoji: "ℹ️", label: "Info:" }; + } +} + +// Use shared createStrong function from remark-libs + +/** + * Process the content of a callout node, handling JSX elements + */ +function processCalloutContent(node: MdxNode): string { + let processedContent = ""; + + // Process each child node to handle HTML elements + for (const child of node.children || []) { + if ( + child.type === "mdxJsxTextElement" || + child.type === "mdxJsxFlowElement" + ) { + // Handle JSX elements like , , etc. + const tagName = child.name; + const innerText = extractNodeText( + (child.children as MdxNode["children"]) || [] + ); + + switch (tagName) { + case "strong": + case "b": + processedContent += `**${innerText}**`; + break; + case "code": + processedContent += `\`${innerText}\``; + break; + case "em": + case "i": + processedContent += `*${innerText}*`; + break; + default: + processedContent += innerText; + } + } else { + // Handle regular text nodes + processedContent += extractNodeText([child as PhrasingContent]); + } + } + + return normalizeWhitespace(processedContent) || ""; +} + +export const remarkCalloutToMarkdown: Plugin<[], Root> = () => { + return createJsxComponentProcessor("Callout", (node) => { + const variantLabelAndEmojiResult = variantLabelAndEmoji( + getAttributeValue(node, "variant") ?? getAttributeValue(node, "type") + ); + const { emoji, label } = variantLabelAndEmojiResult; + const title = (getAttributeValue(node, "title") ?? "").trim() || null; + const clean = processCalloutContent(node); + + // Create single paragraph with inline content (like steps component) + const paragraphChildren: Array = []; + + // Add emoji and label + if (emoji) { + paragraphChildren.push(createText(`${emoji} `)); + } + paragraphChildren.push(createStrong(label)); + + // Add title if present + if (title) { + paragraphChildren.push(createText(" ")); + paragraphChildren.push(createStrong(title)); + } + + // Add content inline if present + if (clean) { + paragraphChildren.push(createText(`\n${clean}`)); + } + + const paragraph: Paragraph = { + type: "paragraph", + children: paragraphChildren, + }; + + const blockquote: Blockquote = { + type: "blockquote", + children: [paragraph], + }; + + return [blockquote]; + }); +}; diff --git a/packages/docs/src/remark/plugins/cards.remark.ts b/packages/docs/src/remark/plugins/cards.remark.ts new file mode 100644 index 0000000..a2ff4a0 --- /dev/null +++ b/packages/docs/src/remark/plugins/cards.remark.ts @@ -0,0 +1,157 @@ +import type { + Link, + List, + ListItem, + Paragraph, + PhrasingContent, + Root, + Text, +} from "mdast"; +import type { Transformer } from "unified"; +import { u } from "unist-builder"; +import { visit } from "unist-util-visit"; +import { + extractNodeText, + getAttributeValue, + hasName, + type MdxNode, + normalizeWhitespace, +} from "../libs"; + +/** + * Types + */ +type CardsToMarkdownOptions = { + /** When true, append a plain-text description after the link. */ + withDescriptions?: boolean; +}; + +type LinkItem = { + href: string; + text: string; + description?: string; +}; + +function createLinkItem(node: MdxNode): LinkItem | null { + const href = normalizeWhitespace(getAttributeValue(node, "href") ?? ""); + if (!href) { + return null; + } + + const titleAttr = normalizeWhitespace(getAttributeValue(node, "title") ?? ""); + const text = titleAttr || extractNodeText(node.children); + if (!text) { + return null; + } + + const description = + normalizeWhitespace(getAttributeValue(node, "description") ?? "") || + undefined; + + return { href, text, description }; +} + +function collectLinksFromParagraph(paragraph: { + children?: unknown[]; +}): LinkItem[] { + const results: LinkItem[] = []; + if (!paragraph.children) { + return results; + } + + for (const child of paragraph.children) { + // Check for Card component with variant="compact" + if (hasName(child, "Card")) { + const variant = getAttributeValue(child, "variant"); + if (variant === "compact") { + const linkItem = createLinkItem(child); + if (linkItem) { + results.push(linkItem); + } + } + } + } + return results; +} + +function collectLinksFromContainer(container: MdxNode): LinkItem[] { + const results: LinkItem[] = []; + + // Iterate only over immediate children to preserve deterministic ordering + if (!container.children) { + return results; + } + + for (const child of container.children) { + if (child.type === "paragraph") { + results.push(...collectLinksFromParagraph(child)); + } else if (hasName(child, "Card")) { + // Check for Card component with variant="compact" + const variant = getAttributeValue(child, "variant"); + if (variant === "compact") { + const linkItem = createLinkItem(child); + if (linkItem) { + results.push(linkItem); + } + } + } + } + + return results; +} + +function toListItem(item: LinkItem, withDescriptions: boolean): ListItem { + const linkNode: Link = u("link", { url: item.href }, [ + u("text", item.text) as Text, + ]) as Link; + + const phrasing: PhrasingContent[] = [linkNode]; + if (withDescriptions && item.description) { + phrasing.push(u("text", ` — ${item.description}`) as Text); + } + + const para: Paragraph = u("paragraph", phrasing) as Paragraph; + + return { + type: "listItem", + spread: false, + children: [para], + }; +} + +export function remarkCardsToMarkdown( + options: CardsToMarkdownOptions = {} +): Transformer { + const { withDescriptions = false } = options; + + return (tree: Root): void => { + visit( + tree, + ["mdxJsxFlowElement", "mdxJsxTextElement"], + (node, index, parent) => { + if (typeof index !== "number" || !parent) { + return; + } + // Only support the new Cards container + if (!hasName(node, "Cards")) { + return; + } + + const links = collectLinksFromContainer(node); + if (links.length === 0) { + parent.children.splice(index, 1); + return; + } + + const list: List = { + type: "list", + ordered: false, + spread: false, + children: links.map((l) => toListItem(l, withDescriptions)), + }; + + parent.children[index] = list; + } + ); + }; +} diff --git a/packages/docs/src/remark/plugins/include.remark.ts b/packages/docs/src/remark/plugins/include.remark.ts new file mode 100644 index 0000000..e9546c0 --- /dev/null +++ b/packages/docs/src/remark/plugins/include.remark.ts @@ -0,0 +1,477 @@ +/** + * Remark plugin to handle include/import MDX elements. + * This replaces the circular re-export with an actual implementation. + */ + +import { existsSync } from "node:fs"; +import { readFile } from "node:fs/promises"; +import { dirname, extname, resolve } from "node:path"; +import type { Code, Root } from "mdast"; +import { remark } from "remark"; +import remarkGfm from "remark-gfm"; +import remarkMdx from "remark-mdx"; +import type { Transformer } from "unified"; +import { visit } from "unist-util-visit"; + +// Regex patterns defined at top level for performance +const FRONTMATTER_REGEX = /^---\n([\s\S]*?)\n---\n([\s\S]*)$/; + +// Shared processor for parsing included content +const sharedProcessor = remark().use(remarkMdx).use(remarkGfm); + +// Simple frontmatter parser for our build pipeline +function parseFrontmatter(content: string): { content: string } { + const match = content.match(FRONTMATTER_REGEX); + + if (!match || match[2] === undefined) { + return { content }; + } + + return { content: match[2] }; +} + +function flattenNode(node: Record): string { + const children = node.children as Record[] | undefined; + const value = node.value as string | undefined; + + if (children) { + return children + .map((child: Record) => flattenNode(child)) + .join(""); + } + + if (value) { + return value; + } + + return ""; +} + +function parseSpecifier(specifier: string): { + file: string; + section?: string; +} { + const idx = specifier.lastIndexOf("#"); + if (idx === -1) { + return { file: specifier }; + } + + return { + file: specifier.slice(0, idx), + section: specifier.slice(idx + 1), + }; +} + +// Extract a specific
    from a parsed MDX root +function extractSection(root: Root, sectionId: string): Root | null { + for (const child of root.children as unknown as Record[]) { + const type = child.type as string | undefined; + const name = (child as Record).name as string | undefined; + if (type === "mdxJsxFlowElement" && name === "section") { + const attributes = (child as Record).attributes as + | Record[] + | undefined; + const hasId = attributes?.some( + (attr) => + attr && + (attr as Record).type === "mdxJsxAttribute" && + (attr as Record).name === "id" && + (attr as Record).value === sectionId + ); + if (hasId) { + const children = (child as Record).children as + | Record[] + | undefined; + return { + type: "root", + children: (children ?? []) as unknown as Root["children"], + }; + } + } + } + return null; +} + +// Extract attributes from MDX JSX node +function extractAttributes( + node: Record +): Record { + const params: Record = {}; + + const attributes = node.attributes as Record[] | undefined; + if (attributes) { + for (const attr of attributes) { + if (attr.type === "mdxJsxAttribute") { + const name = attr.name as string; + const value = attr.value as string | null; + params[name] = value; + } + } + } + + return params; +} + +// Helpers to simplify node replacement +function replaceWithParagraph( + node: Record, + text: string +): void { + Object.assign(node, { + type: "paragraph", + children: [ + { + type: "text", + value: text, + }, + ], + }); +} + +function replaceWithRootChildren( + node: Record, + children: unknown[] +): void { + Object.assign(node, { + type: "root", + children, + }); +} + +/** + * Safe parent promotion helper for include transformations. + * + * When including content that results in multiple top-level nodes (type: 'root'), + * we "promote" the replacement up to the parent level if the current node is + * inside a paragraph. This prevents nested structure issues. + * + * Promotion occurs when: + * - Parent exists AND parent is a paragraph AND replacement.type === 'root' + * + * useParent = true means we replace the parent paragraph with the root's children, + * effectively "flattening" the structure by promoting content up one level. + * + * Example: + * Input:

    → {type: 'root', children: [{type: 'h1'}, {type: 'p'}]}

    + * Output:

    becomes {type: 'h1'} and sibling {type: 'p'} + * (paragraph is replaced with the included content's children) + */ +function isParagraph(node: Record): boolean { + return node.type === "paragraph"; +} +function replaceTarget( + node: Record, + parent: Record | null, + replacement: + | { type: "root"; children: unknown[] } + | { type: "paragraph"; children: unknown[] } +) { + const useParent = + parent && isParagraph(parent) && replacement.type === "root"; + Object.assign(useParent ? parent : node, replacement); +} + +type ParserLike = { parse: (v: string) => unknown }; + +function annotateNestedIncludes(root: Root, baseDir: string | null): void { + if (!baseDir) { + return; + } + + const includeTagNames = ["import", "include-c15t", "include"]; + + visit(root, (node) => { + const record = node as unknown as Record; + const nodeType = record.type as string | undefined; + const nodeName = record.name as string | undefined; + + if ( + (nodeType === "mdxJsxFlowElement" || nodeType === "mdxJsxTextElement") && + nodeName && + includeTagNames.includes(nodeName) + ) { + const attributes = + (record.attributes as Record[] | undefined) ?? []; + const hasBaseDir = attributes.some( + (attr) => + attr && + (attr as Record).type === "mdxJsxAttribute" && + (attr as Record).name === "baseDir" + ); + + if (!hasBaseDir) { + attributes.push({ + type: "mdxJsxAttribute", + name: "baseDir", + value: baseDir, + }); + + record.attributes = attributes; + } + } + + return; + }); +} + +function includeContentAsMarkdown( + node: Record, + includeFile: string, + bodyContent: string, + options: { section?: string; parser?: ParserLike; baseDir?: string | null } +): void { + try { + const chosenParser = + options.parser ?? (sharedProcessor as unknown as ParserLike); + let parsed = chosenParser.parse(bodyContent.trim()) as Root; + + if (options.section) { + const extracted = extractSection(parsed, options.section); + if (extracted) { + parsed = extracted; + } else { + replaceWithParagraph( + node, + `[Error: Could not find section "${options.section}" in ${includeFile}]` + ); + return; + } + } + + // Attach base directory metadata to any nested include/import tags so + // that subsequent passes can resolve their relative paths correctly. + annotateNestedIncludes(parsed, options.baseDir ?? null); + + if (parsed.children && parsed.children.length > 0) { + replaceWithRootChildren(node, parsed.children); + } else { + replaceWithParagraph(node, bodyContent.trim()); + } + } catch { + replaceWithParagraph(node, bodyContent.trim()); + } +} + +// Resolve file path with custom base paths +function resolveIncludePath( + file: string, + directory: string, + params: Record, + basePaths: string[] +): string { + const baseDir = params.baseDir; + if (baseDir) { + return resolve(baseDir, file); + } + + // If 'cwd' attribute is set, use process.cwd() + if ("cwd" in params) { + return resolve(process.cwd(), file); + } + + // Try relative to current directory first + const targetPath = resolve(directory, file); + if (existsSync(targetPath)) { + return targetPath; + } + + // Try provided base directories only (no heuristics) + for (const basePath of basePaths) { + const candidate = resolve(basePath, file); + if (existsSync(candidate)) { + return candidate; + } + } + + // Fall back to first base path if available, otherwise directory + if (basePaths.length > 0 && basePaths[0]) { + return resolve(basePaths[0], file); + } + + return resolve(directory, file); +} + +// Check if node is an include node +function isIncludeNode( + node: Record, + tagName: string +): boolean { + const nodeType = node.type as string; + const nodeName = node.name as string; + + return ( + (nodeType === "mdxJsxFlowElement" || nodeType === "mdxJsxTextElement") && + nodeName === tagName + ); +} + +// Process a single include node +async function processIncludeNode( + node: Record, + workingDir: string, + basePaths: string[], + fileData?: unknown +): Promise { + const params = extractAttributes(node); + const specifier = flattenNode(node).trim() || (params.src ?? "").trim(); + + if (!specifier) { + return; + } + + const { file: includeFile, section } = parseSpecifier(specifier); + + const targetPath = resolveIncludePath( + includeFile, + workingDir, + params, + basePaths + ); + + // Register dependency with host compiler (for hot reload / rebuilds) + const compiler = ( + fileData as + | { _compiler?: { addDependency?: (p: string) => void } } + | undefined + )?._compiler; + compiler?.addDependency?.(targetPath); + + const isCodeFile = !( + includeFile.endsWith(".md") || includeFile.endsWith(".mdx") + ); + const asCode = Boolean(params.lang) || isCodeFile; + + try { + const content = await readFile(targetPath, "utf8"); + + if (asCode) { + const lang = params.lang ?? extname(includeFile).slice(1); + + Object.assign(node, { + type: "code", + lang, + meta: params.meta, + value: content, + data: {}, + } satisfies Code); + return; + } + + // For markdown/MDX files, parse and include the content properly + const { content: bodyContent } = parseFrontmatter(content); + + // Prefer host site's processor to preserve its plugins/transforms + const ext = includeFile.endsWith(".md") ? "md" : "mdx"; + const hostProcessor = (fileData as Record | undefined) + ?._processor as { getProcessor?: (kind: string) => unknown } | undefined; + const parser = hostProcessor?.getProcessor + ? hostProcessor.getProcessor(ext) + : sharedProcessor; + + includeContentAsMarkdown(node, includeFile, bodyContent, { + baseDir: dirname(targetPath), + section, + parser: parser as ParserLike, + }); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + process.stderr.write( + `Warning: Failed to include file ${targetPath}: ${errorMessage}\n` + ); + + // Replace with error message + Object.assign(node, { + type: "paragraph", + children: [ + { + type: "text", + value: `[Error: Could not include file ${includeFile}]`, + }, + ], + }); + } +} + +export function remarkInclude( + basePaths: string[] = [] +): Transformer { + const TagNames = ["import", "include-c15t", "include"]; + + return async (tree, file) => { + const workingDir = file.path ? dirname(file.path) : process.cwd(); + + // Support nested includes by repeatedly scanning the tree until no more + // include/import nodes are found. This is safe because: + // - Each successful include replaces the original node + // - We don't introduce new include tags from processed content unless + // they are truly nested includes that still need resolution. + // + // A hard cap on iterations prevents accidental infinite loops in case of + // pathological or cyclic content. + const MAX_PASSES = 10; + + for (let pass = 0; pass < MAX_PASSES; pass += 1) { + let foundInclude = false; + const tasks: Promise[] = []; + + visit(tree, (node, _idx, parent) => { + const nodeRecord = node as unknown as Record; + const isMatch = TagNames.some((t) => isIncludeNode(nodeRecord, t)); + + if (!isMatch) { + return; + } + + foundInclude = true; + + tasks.push( + processIncludeNode( + nodeRecord, + workingDir, + basePaths, + (file as unknown as { data?: unknown })?.data + ).then(() => { + const after = nodeRecord as unknown as { + type?: string; + children?: unknown[]; + }; + + if (after.type === "root" && Array.isArray(after.children)) { + replaceTarget( + nodeRecord, + (parent as unknown as Record) ?? null, + { type: "root", children: after.children } + ); + } else if ( + after.type === "paragraph" && + Array.isArray(after.children) + ) { + const parentRecord = + (parent as unknown as Record) ?? null; + + if (parentRecord && isParagraph(parentRecord)) { + // Avoid nested

    ...

    structures by promoting + // the included paragraph's children to the parent level. + replaceTarget(nodeRecord, parentRecord, { + type: "root", + children: after.children, + }); + } + } + }) + ); + + // Skip traversing into this node's children; they'll be visited + // on the next pass if they still contain includes. + return "skip"; + }); + + if (!foundInclude) { + // No more includes to process + break; + } + + await Promise.all(tasks); + } + }; +} diff --git a/packages/docs/src/remark/plugins/link-icon.remark.ts b/packages/docs/src/remark/plugins/link-icon.remark.ts new file mode 100644 index 0000000..df98e2b --- /dev/null +++ b/packages/docs/src/remark/plugins/link-icon.remark.ts @@ -0,0 +1,137 @@ +import type { Link, Root } from "mdast"; +import type { Transformer } from "unified"; +import { visit } from "unist-util-visit"; + +// Regex patterns for URL cleaning (defined at top level for performance) +const PROTOCOL_REGEX = /^https?:\/\//; +const WWW_REGEX = /^www\./; + +/** + * Strip protocol and www. from URL for display + */ +function cleanUrlForDisplay(url: string): string { + return url.replace(PROTOCOL_REGEX, "").replace(WWW_REGEX, ""); +} + +/** + * Check if a URL is external (http/https or protocol-relative) + */ +function isExternalUrl(url: string): boolean { + return ( + url.startsWith("http://") || + url.startsWith("https://") || + url.startsWith("//") + ); +} + +/** + * Check if link already has an icon component + */ +function hasIcon(child: unknown): child is { type: string; name: string } { + return ( + !!child && + typeof child === "object" && + "type" in child && + child.type === "mdxJsxTextElement" && + "name" in child && + child.name === "Icon" + ); +} + +/** + * Clean up link text by removing protocol and www. prefixes + */ +function cleanLinkText(node: Link, url: string): void { + if (!node.children || node.children.length === 0) { + return; + } + + const cleanedUrl = cleanUrlForDisplay(url); + const urlWithoutProtocol = url.replace(PROTOCOL_REGEX, ""); + const urlWithoutProtocolAndWww = urlWithoutProtocol.replace(WWW_REGEX, ""); + + for (const child of node.children) { + if ( + child && + typeof child === "object" && + "type" in child && + child.type === "text" + ) { + const textValue = child.value; + if ( + textValue === url || + textValue === urlWithoutProtocol || + textValue === urlWithoutProtocolAndWww + ) { + child.value = cleanedUrl; + } + } + } +} + +/** + * Remark plugin to add an icon to external links + * Adds an external link icon (external-link) to markdown links that are external + * Also cleans up link text by removing https://, http://, and www. prefixes + */ +export function remarkLinkIcon(): Transformer { + return (tree) => { + visit(tree, "link", (node: Link) => { + const url = node.url; + + if (!isExternalUrl(url)) { + return; + } + + // Clean up link text: strip https://, http://, and www. from text nodes + cleanLinkText(node, url); + + // Skip if link already has an icon + const lastChild = node.children?.[node.children.length - 1]; + if (hasIcon(lastChild)) { + return; + } + + // Create Icon component as MDX JSX element + const iconElement = { + type: "mdxJsxTextElement" as const, + name: "Icon", + attributes: [ + { + type: "mdxJsxAttribute" as const, + name: "name", + value: "external-link", + }, + { + type: "mdxJsxAttribute" as const, + name: "width", + value: "16", + }, + { + type: "mdxJsxAttribute" as const, + name: "height", + value: "16", + }, + { + type: "mdxJsxAttribute" as const, + name: "className", + value: "inline-block align-text-bottom", + }, + { + type: "mdxJsxAttribute" as const, + name: "aria-hidden", + value: "true", + }, + ], + children: [], + }; + + // Add icon to link children + if (node.children) { + node.children.push(iconElement); + } else { + node.children = [iconElement]; + } + }); + }; +} diff --git a/packages/docs/src/remark/plugins/mermaid.remark.ts b/packages/docs/src/remark/plugins/mermaid.remark.ts new file mode 100644 index 0000000..9b4e9f8 --- /dev/null +++ b/packages/docs/src/remark/plugins/mermaid.remark.ts @@ -0,0 +1,82 @@ +import type { Code, Root } from "mdast"; +import type { Transformer } from "unified"; +import { visit } from "unist-util-visit"; +import { + extractNodeText, + getAttributeValue, + hasName, + type MdxNode, +} from "../libs"; + +// Precompiled regexes +const ESCAPED_NL = /\\n/g; // "\\n" -> actual newline +const CRLF = /\r\n/g; // CRLF -> LF +const LEADING_BACKTICK_LINE = /^`\s*\n/; // a lone backtick then newline +const TRAILING_BACKTICK_LINE = /\n\s*`$/; // newline then a lone backtick +const TRAILING_WHITESPACE = /[ \t]+$/; // trailing spaces/tabs on a line +const LEADING_BLANK_LINES = /^\s*\n+/; // one or more blank lines at start +const TRAILING_BLANK_LINES = /\n+\s*$/; // one or more blank lines at end +const HTML_BREAK = //gi; + +function cleanMermaidSource(raw: string): string { + // Step 1: Normalize CRLF to LF + let s = raw.replace(CRLF, "\n"); + + // Step 2: Convert escaped newlines + s = s.replace(ESCAPED_NL, "\n"); + + // Step 2.5: Convert HTML line breaks to plain text separators for readability + s = s.replace(HTML_BREAK, " / "); + + // Step 3: Strip only outer blank lines (not leading spaces) + s = s.replace(LEADING_BLANK_LINES, "").replace(TRAILING_BLANK_LINES, ""); + + // Step 4: Remove leading/trailing backtick guard lines + s = s.replace(LEADING_BACKTICK_LINE, "").replace(TRAILING_BACKTICK_LINE, ""); + + // Step 5: Split into lines, trim only trailing whitespace from each line, rejoin + // (preserving leading indentation) + s = s + .split("\n") + .map((line) => line.replace(TRAILING_WHITESPACE, "")) + .join("\n"); + + return s; +} + +function toMermaidCode(value: string): Code { + return { type: "code", lang: "mermaid", value }; +} + +function extractMermaidContent(node: MdxNode): string { + const chartAttr = getAttributeValue(node, "chart"); + const fromChildren = extractNodeText(node.children || []); + const src = + chartAttr && chartAttr.trim().length > 0 ? chartAttr : fromChildren; + return src ? cleanMermaidSource(src) : ""; +} + +export function remarkMermaidToMarkdown(): Transformer { + return (tree) => { + visit( + tree, + ["mdxJsxFlowElement", "mdxJsxTextElement"], + (node, index, parent) => { + if (!parent || typeof index !== "number" || !hasName(node, "Mermaid")) { + return; + } + + const value = extractMermaidContent(node); + + // Remove empty Mermaid nodes completely + if (!value) { + parent.children.splice(index, 1); + return index; + } + + const code = toMermaidCode(value); + parent.children[index] = code; + } + ); + }; +} diff --git a/packages/docs/src/remark/plugins/package-command-tabs.remark.ts b/packages/docs/src/remark/plugins/package-command-tabs.remark.ts new file mode 100644 index 0000000..aaa7998 --- /dev/null +++ b/packages/docs/src/remark/plugins/package-command-tabs.remark.ts @@ -0,0 +1,71 @@ +/** @biome-ignore lint/style/useDefaultSwitchClause: the switch statement is complete */ +/** @biome-ignore lint/nursery/noUnnecessaryConditions: these are packages */ +import type { Root } from "mdast"; +import type { Transformer } from "unified"; +import { + createInlineCode, + createJsxComponentProcessor, + createTable, + getAttributeValue, +} from "../libs"; + +type Mode = "run" | "install"; + +type Options = { + /** Column labels. */ + labels?: { pm?: string; command?: string }; + /** Which package managers to include and in what order. */ + managers?: Array<"npm" | "pnpm" | "yarn" | "bun">; +}; + +const DEFAULT_LABELS = { pm: "Package manager", command: "Command" } as const; +const DEFAULT_MANAGERS = ["npm", "pnpm", "yarn", "bun"] as const; + +const COMMANDS = { + install: { + npm: "npm install {pkg}", + pnpm: "pnpm add {pkg}", + yarn: "yarn add {pkg}", + bun: "bun add {pkg}", + }, + run: { + npm: "npx {pkg}", + pnpm: "pnpm dlx {pkg}", + yarn: "yarn dlx {pkg}", + bun: "bunx {pkg}", + }, +} as const; + +type Pm = keyof (typeof COMMANDS)["run"]; + +function cmdsFor(pm: Pm, pkgCmd: string, mode: Mode): string { + const template = COMMANDS[mode][pm]; + return template.replace("{pkg}", pkgCmd); +} + +export function remarkPackageCommandTabsToMarkdown( + opts: Options = {} +): Transformer { + const labels = { ...DEFAULT_LABELS, ...(opts.labels ?? {}) }; + const managers = [...(opts.managers ?? DEFAULT_MANAGERS)]; + + return createJsxComponentProcessor("PackageCommandTabs", (node) => { + const rawCommand = (getAttributeValue(node, "command") ?? "").trim(); + const rawMode = (getAttributeValue(node, "mode") ?? "run").trim(); + const mode: Mode = rawMode === "install" ? "install" : "run"; + + if (!rawCommand) { + return []; + } + + // Build table data + const headers = [labels.pm, labels.command]; + const rows = managers.map((pm) => { + const cmd = cmdsFor(pm, rawCommand, mode); + return [pm, [createInlineCode(cmd)]]; + }); + + const table = createTable(headers, rows, ["left", "left"]); + return [table]; + }); +} diff --git a/packages/docs/src/remark/plugins/remove-imports.remark.ts b/packages/docs/src/remark/plugins/remove-imports.remark.ts new file mode 100644 index 0000000..15965fe --- /dev/null +++ b/packages/docs/src/remark/plugins/remove-imports.remark.ts @@ -0,0 +1,49 @@ +import type { Root } from "mdast"; +import type { MdxjsEsm } from "mdast-util-mdxjs-esm"; +import type { Transformer } from "unified"; +import { visit } from "unist-util-visit"; + +// Precompiled regex for performance and lint compliance +const IMPORT_STATEMENT_PATTERN = /^\s*import\s/m; +const EXPORT_STATEMENT_PATTERN = /^\s*export\s/m; + +export function remarkRemoveImports(): Transformer { + return (tree) => { + visit(tree, "mdxjsEsm", (node: MdxjsEsm, index, parent) => { + if ( + parent === null || + parent === undefined || + index === null || + index === undefined + ) { + return; + } + const value = node.value ?? ""; + + // Check if this node contains import statements + if (IMPORT_STATEMENT_PATTERN.test(value)) { + // Split the content into lines to analyze each statement + const lines = value + .split("\n") + .map((line) => line.trim()) + .filter((line) => line.length > 0); + + // Check if this node contains ONLY import statements + const hasOnlyImports = lines.every((line) => + IMPORT_STATEMENT_PATTERN.test(line) + ); + const hasExports = lines.some((line) => + EXPORT_STATEMENT_PATTERN.test(line) + ); + + // Only remove the node if it contains ONLY imports and no exports + if (hasOnlyImports && !hasExports) { + parent.children.splice(index, 1); + return index; + } + // If it contains mixed content (imports + exports), leave it intact + // This preserves exports even when imports are present in the same node + } + }); + }; +} diff --git a/packages/docs/src/remark/plugins/steps.remark.ts b/packages/docs/src/remark/plugins/steps.remark.ts new file mode 100644 index 0000000..1649793 --- /dev/null +++ b/packages/docs/src/remark/plugins/steps.remark.ts @@ -0,0 +1,277 @@ +import { decodeNamedCharacterReference } from "decode-named-character-reference"; +import type { Blockquote, ListItem, Node, Paragraph, Root, Table } from "mdast"; +import { compact } from "mdast-util-compact"; +import { toString as mdastToString } from "mdast-util-to-string"; +import type { Plugin } from "unified"; +import { is } from "unist-util-is"; +import { + createJsxComponentProcessor, + createOrderedList, + createStrongParagraph, + getAttributeValue, + hasName, + type MdxNode, + normalizeWhitespace, + processContentNode, +} from "../libs"; + +/** + * Decode HTML entities in text (both named and numeric character references) + */ +function decodeText(text: string): string { + const HEX_PREFIX_LENGTH = 2; // Length of "#x" prefix + const DECIMAL_PREFIX_LENGTH = 1; // Length of "#" prefix + const HEX_RADIX = 16; + const DECIMAL_RADIX = 10; + const UNICODE_MAX_CODE_POINT = 0x10_ff_ff; // Maximum valid Unicode code point + const SURROGATE_MIN = 0xd8_00; // Start of Unicode surrogate range + const SURROGATE_MAX = 0xdf_ff; // End of Unicode surrogate range + + const decodedText = text.replace( + /&(#x?[0-9A-Fa-f]+|[a-zA-Z][a-zA-Z0-9]+);/g, + (_m: string, ent: string): string => { + // Numeric: { or 💩 + if (ent[0] === "#") { + const isHex = ent[1]?.toLowerCase() === "x"; + const num = Number.parseInt( + ent.slice(isHex ? HEX_PREFIX_LENGTH : DECIMAL_PREFIX_LENGTH), + isHex ? HEX_RADIX : DECIMAL_RADIX + ); + const isInteger = Number.isInteger(num); + const inUnicodeRange = + isInteger && + num >= 0 && + num <= UNICODE_MAX_CODE_POINT && + !(num >= SURROGATE_MIN && num <= SURROGATE_MAX); // exclude surrogate range + return inUnicodeRange ? String.fromCodePoint(num) : `&${ent};`; + } + // Named + const decoded = decodeNamedCharacterReference(`&${ent};`); + return decoded === false ? `&${ent};` : decoded; + } + ); + return normalizeWhitespace(decodedText); +} + +// Use shared createStrongParagraph function from remark-libs + +/** + * Type guard for Step JSX element + */ +function isStepNode(node: unknown): node is MdxNode { + if (typeof node !== "object" || node === null) { + return false; + } + + const isJsxElement = + is(node, "mdxJsxFlowElement") || is(node, "mdxJsxTextElement"); + if (!isJsxElement) { + return false; + } + + return hasName(node, "Step"); +} + +/** + * Extract title from a Step node, preferring title attribute over content + */ +function extractStepTitle( + step: MdxNode +): { title: string; titleNode: Node | null } | null { + // Prefer explicit title attribute; fall back to first heading/paragraph/text + const attrTitleRaw = (getAttributeValue(step, "title") ?? "").trim(); + + if (attrTitleRaw) { + const title = decodeText(attrTitleRaw); + return title ? { title, titleNode: null } : null; + } + + const children = (step.children ?? []) as unknown[] as Node[]; + const titleNode = + children.find((c) => is(c, "heading") || is(c, "paragraph")) ?? + children.find((c) => c.type === "text" || c.type === "mdxTextExpression") ?? + null; + if (!titleNode) { + return null; + } + const title = decodeText(mdastToString(titleNode)); + return title ? { title, titleNode } : null; +} + +/** + * Get content nodes that come after the title node + */ +function getContentNodes(step: MdxNode, titleNode: Node | null): Node[] { + const children = (step.children ?? []) as unknown[] as Node[]; + let startIdx = -1; + if (titleNode) { + startIdx = children.indexOf(titleNode); + } + return startIdx >= 0 ? children.slice(startIdx + 1) : children; +} + +// Helper function to process content nodes and build list item children +function processContentNodesForListItem( + contentNodes: Node[], + titleParagraph: Paragraph, + listItemChildren: (Paragraph | Table | Blockquote | Node)[] +): void { + if (contentNodes.length === 0) { + return; + } + + // Special handling for table nodes - keep them as separate elements + const firstContentNode = contentNodes[0]; + let startIndex = 1; // Default starting index for the remaining nodes loop + + if (firstContentNode && firstContentNode.type === "table") { + // For tables, keep them as separate elements (don't inline with title) + listItemChildren.push(firstContentNode); + } else if (firstContentNode && firstContentNode.type === "paragraph") { + // For paragraphs, inline the text content into the title + const firstContentText = mdastToString(firstContentNode); + if (firstContentText.trim()) { + const normalizedContent = normalizeWhitespace(firstContentText.trim()); + const contentTextNode = { + type: "text", + value: ` ${normalizedContent}`, + } as const; + titleParagraph.children.push(contentTextNode); + } + // Start processing remaining nodes from index 1 + startIndex = 1; + } else if (firstContentNode) { + // For other block-level content (blockquote, code, list, etc.), + // process as separate element and start remaining loop from index 1 + const processedFirstNode = processContentNode(firstContentNode); + if (processedFirstNode) { + listItemChildren.push(processedFirstNode); + } + // Start processing remaining nodes from index 1 + startIndex = 1; + } + + // Add remaining content nodes as separate elements + for (let i = startIndex; i < contentNodes.length; i++) { + const node = contentNodes[i]; + if (!node) { + continue; + } + const contentNode = processContentNode(node); + if (contentNode) { + listItemChildren.push(contentNode); + } + } +} + +/** + * Convert a Step node to a list item + */ +function stepToListItem(step: MdxNode): ListItem | null { + const titleResult = extractStepTitle(step); + if (!titleResult) { + return null; + } + + const { title, titleNode } = titleResult; + const contentNodes = getContentNodes(step, titleNode); + + // Handle special case: if first paragraph was used as title and there are no following siblings + if (contentNodes.length === 0 && titleNode && is(titleNode, "paragraph")) { + return { + type: "listItem", + children: [createStrongParagraph(title)], + } as ListItem; + } + + // Create the title paragraph + const titleParagraph: Paragraph = { + type: "paragraph", + children: [{ type: "strong", children: [{ type: "text", value: title }] }], + }; + + // If no additional content, just return the title + if (contentNodes.length === 0) { + return { + type: "listItem", + children: [titleParagraph], + } as ListItem; + } + + // Create list item children array + const listItemChildren: (Paragraph | Table | Blockquote | Node)[] = [ + titleParagraph, + ]; + + // Process content nodes + processContentNodesForListItem( + contentNodes, + titleParagraph, + listItemChildren + ); + + return { + type: "listItem", + children: listItemChildren, + } as ListItem; +} + +/** + * Process Steps node children to extract list items + */ +function processStepsNode(node: MdxNode): ListItem[] { + const listItems: ListItem[] = []; + + const pushStep = (candidate: unknown): void => { + if (!isStepNode(candidate)) { + return; + } + const listItem = stepToListItem(candidate); + if (listItem) { + listItems.push(listItem); + } + }; + + for (const child of node.children ?? []) { + if (isStepNode(child)) { + pushStep(child); + continue; + } + if (child.type === "paragraph") { + // When Steps content is inline-indented, MDX wraps elements in a + // paragraph. Drill one level to find them. + const paragraphChildren = + (child as { children?: unknown[] }).children ?? []; + for (const paragraphChild of paragraphChildren) { + pushStep(paragraphChild); + } + } + } + + return listItems; +} + +/** + * Remark plugin to convert Steps JSX elements to numbered markdown lists + */ +export const remarkStepsToMarkdown: Plugin<[], Root> = () => { + return (tree) => { + const processor = createJsxComponentProcessor("Steps", (node) => { + const items = processStepsNode(node); + + if (items.length === 0) { + return []; + } + + // Create ordered list - always spread for better readability + const list = createOrderedList(items, 1, true); + return [list]; + }); + + processor(tree); + + // Clean up whitespace and empty paragraphs + compact(tree); + return tree; + }; +}; diff --git a/packages/docs/src/remark/plugins/tabs.remark.ts b/packages/docs/src/remark/plugins/tabs.remark.ts new file mode 100644 index 0000000..fe87694 --- /dev/null +++ b/packages/docs/src/remark/plugins/tabs.remark.ts @@ -0,0 +1,191 @@ +import type { Paragraph, PhrasingContent, Root, RootContent } from "mdast"; +import { toString as mdastToString } from "mdast-util-to-string"; +import type { Transformer } from "unified"; +import { + createJsxComponentProcessor, + createStrongParagraph, + getAttributeValue, + hasName, + type MdxNode, + parseItemsArray, +} from "../libs"; + +const BLOCK_TYPES = new Set([ + "blockquote", + "code", + "heading", + "list", + "paragraph", + "table", + "thematicBreak", + "mdxJsxFlowElement", +]); + +function isBlockNode(node: RootContent): boolean { + return BLOCK_TYPES.has(node.type); +} + +type TabSection = { + title: string; + nodes: RootContent[]; + orderKey: number; // for ordering against Tabs.items if provided +}; + +// ---------- core transform ---------- + +function extractTabsSections(tabsNode: MdxNode): TabSection[] { + const itemsAttr = parseItemsArray(getAttributeValue(tabsNode, "items")); + const sections: TabSection[] = []; + const children = (tabsNode.children ?? []) as RootContent[]; + let tabIndex = 0; + + // Helper functions to reduce complexity + const checkHasNonEmptyContent = (nodes: RootContent[]): boolean => + nodes.some((node) => { + const textContent = mdastToString(node); + return textContent.trim().length > 0; + }); + + // TODO: implement nested tabs support — recursively process nested Tabs components within tab content + + const getTabTitle = (tabNode: MdxNode): string => { + // Prefer explicit + const titleFromAttr = getAttributeValue(tabNode, "value")?.trim() || null; + + // Fallback: if Tabs has items, map by index + const titleFromItems = itemsAttr?.[tabIndex] ?? null; + + return titleFromAttr || titleFromItems || `Tab ${tabIndex + 1}`; + }; + + const processTabContent = (tabChildren: RootContent[]): RootContent[] => { + if (tabChildren.length === 0) { + return []; + } + const result: RootContent[] = []; + let inlineBuffer: PhrasingContent[] = []; + + const flushInline = (): void => { + if (inlineBuffer.length === 0) { + return; + } + const paragraph: Paragraph = { + type: "paragraph", + children: inlineBuffer, + }; + result.push(paragraph); + inlineBuffer = []; + }; + + for (const child of tabChildren) { + if (isBlockNode(child)) { + flushInline(); + result.push(child); + } else { + inlineBuffer.push(child as PhrasingContent); + } + } + flushInline(); + return result; + }; + + // Helper function to calculate order key for a tab + const calculateOrderKey = (tabNode: MdxNode): number => { + const valueAttr = getAttributeValue(tabNode, "value")?.trim() ?? null; + if (itemsAttr) { + const inItemsIndex = + valueAttr === null ? -1 : itemsAttr.indexOf(valueAttr); + return inItemsIndex >= 0 ? inItemsIndex : tabIndex; + } + return sections.length; + }; + + // Helper function to create a tab section + const createTabSection = (tabNode: MdxNode): TabSection | null => { + const title = getTabTitle(tabNode); + const tabChildren = (tabNode.children ?? []) as RootContent[]; + const processedChildren = processTabContent(tabChildren); + + if (!checkHasNonEmptyContent(processedChildren)) { + return null; + } + + return { + title, + nodes: processedChildren, + orderKey: calculateOrderKey(tabNode), + }; + }; + + // Helper function to process a Tab node + const processTabNode = (tabNode: MdxNode) => { + const section = createTabSection(tabNode); + if (section) { + sections.push(section); + } + tabIndex += 1; + }; + + for (const child of children) { + if (hasName(child, "Tab")) { + // Direct Tab child + processTabNode(child as MdxNode); + } else if (child.type === "paragraph") { + // Check if paragraph contains Tab elements + const paragraphChildren = + (child as { children?: RootContent[] }).children ?? []; + for (const paragraphChild of paragraphChildren) { + if (hasName(paragraphChild, "Tab")) { + processTabNode(paragraphChild as MdxNode); + } + } + } + } + + // If itemsAttr exists, sections are already in DOM order which should match, + // but we still sort by orderKey to be explicit. + sections.sort((a, b) => a.orderKey - b.orderKey); + + return sections; +} + +// ---------- plugin ---------- + +// Helper function to add content nodes with proper spacing +const addContentNodes = ( + replacement: RootContent[], + nodes: RootContent[] +): void => { + // Add each content node separately to maintain separation + for (const node of nodes) { + replacement.push(node); + } +}; + +const createReplacement = (sections: TabSection[]): RootContent[] => { + const replacement: RootContent[] = []; + + // Collect all content from all sections with section headers + for (const section of sections) { + // Add section header + const headerParagraph = createStrongParagraph(section.title); + replacement.push(headerParagraph); + + // Add the section content (content nodes already have their own spacing) + addContentNodes(replacement, section.nodes); + } + + return replacement; +}; + +export function remarkTabsToMarkdown(): Transformer { + return createJsxComponentProcessor("Tabs", (node) => { + const sections = extractTabsSections(node); + + if (sections.length === 0) { + return []; + } + + return createReplacement(sections); + }); +} diff --git a/packages/docs/src/remark/plugins/toc-extract.remark.ts b/packages/docs/src/remark/plugins/toc-extract.remark.ts new file mode 100644 index 0000000..20d4be8 --- /dev/null +++ b/packages/docs/src/remark/plugins/toc-extract.remark.ts @@ -0,0 +1,76 @@ +import { readFile } from "node:fs/promises"; +import GithubSlugger from "github-slugger"; +import type { Heading, InlineCode, Root, Text } from "mdast"; +import { remark } from "remark"; +import remarkMdx from "remark-mdx"; +import { visit } from "unist-util-visit"; + +export interface TOCItem { + depth: number; + title: string; + url: string; +} + +/** + * Extract text content from a heading node + * Collects both regular text and inline code content + */ +function extractHeadingText(node: Heading): string { + const textParts: string[] = []; + + // Visit both 'text' and 'inlineCode' nodes to capture full heading content + visit(node, (childNode) => { + if (childNode.type === "text" || childNode.type === "inlineCode") { + textParts.push((childNode as Text | InlineCode).value); + } + }); + + return textParts.join(""); +} + +/** + * Extract TOC items from MDX AST. + * Uses GithubSlugger (same as rehype-slug) to ensure IDs match rendered headings. + */ +function extractTocFromAst(tree: Root): TOCItem[] { + const toc: TOCItem[] = []; + // Use GithubSlugger for consistent ID generation with rehype-slug + const slugger = new GithubSlugger(); + + visit(tree, "heading", (node: Heading) => { + // Only include h2-h4 headings (skip h1 as it's usually the title) + if (node.depth >= 2 && node.depth <= 4) { + const text = extractHeadingText(node); + if (text) { + toc.push({ + title: text, + url: `#${slugger.slug(text)}`, + depth: node.depth, + }); + } + } + }); + + return toc; +} + +/** + * Extract TOC from MDX content string + */ +export async function extractTocFromContent( + content: string +): Promise { + const processor = remark().use(remarkMdx); + const tree = processor.parse(content); + return extractTocFromAst(tree as Root); +} + +/** + * Extract TOC from an MDX file path. + */ +export async function extractTocFromFile(filePath: string): Promise { + const content = await readFile(filePath, "utf-8"); + // Remove frontmatter before parsing + const contentWithoutFrontmatter = content.replace(/^---[\s\S]*?---\n/, ""); + return extractTocFromContent(contentWithoutFrontmatter); +} diff --git a/packages/docs/src/remark/plugins/type-table.remark.ts b/packages/docs/src/remark/plugins/type-table.remark.ts new file mode 100644 index 0000000..e25a6c1 --- /dev/null +++ b/packages/docs/src/remark/plugins/type-table.remark.ts @@ -0,0 +1,859 @@ +import { existsSync, readFileSync } from "node:fs"; +import { resolve } from "node:path"; +import JSON5 from "json5"; +import type { RootContent, Table } from "mdast"; +import type { MdxJsxFlowElement, MdxJsxTextElement } from "mdast-util-mdx"; +import * as ts from "typescript"; +import { u } from "unist-builder"; +import { + createHeading, + createJsxComponentProcessor, + createParagraph, + createTable, + createTableRow, + getAttributeValue, + hasName, + type MdxNode, + normalizeWhitespace, +} from "../libs"; + +type ObjectType = { + description?: string; + type: string; + typeDescription?: string; + typeDescriptionLink?: string; + default?: string; + required?: boolean; + deprecated?: boolean; +}; + +let __tsCompilerOptions: ts.CompilerOptions | null = null; +const __tsProgramByRootFile = new Map< + string, + { + program: ts.Program; + checker: ts.TypeChecker; + sourceFile: ts.SourceFile; + } +>(); + +function getTypeScriptCompilerOptions(): ts.CompilerOptions { + if (__tsCompilerOptions) { + return __tsCompilerOptions; + } + + // Try to resolve tsconfig.json path relative to current working directory + // This handles both local development and serverless environments + const tsConfigPath = resolve(process.cwd(), "tsconfig.json"); + + // Read and parse tsconfig.json if it exists + let compilerOptions: ts.CompilerOptions = { + target: ts.ScriptTarget.ESNext, + module: ts.ModuleKind.ESNext, + jsx: ts.JsxEmit.ReactJSX, + moduleResolution: ts.ModuleResolutionKind.Bundler, + allowJs: true, + skipLibCheck: true, + strict: true, + esModuleInterop: true, + resolveJsonModule: true, + isolatedModules: true, + }; + + if (existsSync(tsConfigPath)) { + try { + const configFile = ts.readConfigFile(tsConfigPath, (path) => + readFileSync(path, "utf-8") + ); + const parsedConfig = ts.parseJsonConfigFileContent( + configFile.config, + ts.sys, + process.cwd() + ); + compilerOptions = { ...compilerOptions, ...parsedConfig.options }; + } catch { + // Fallback to default options if tsconfig parsing fails + } + } + + __tsCompilerOptions = compilerOptions; + return compilerOptions; +} + +function getTypeScriptProgramForFile(rootFilePath: string): { + program: ts.Program; + checker: ts.TypeChecker; + sourceFile: ts.SourceFile; +} | null { + const cached = __tsProgramByRootFile.get(rootFilePath); + if (cached) { + return cached; + } + + const compilerOptions = getTypeScriptCompilerOptions(); + const host = ts.createCompilerHost(compilerOptions, true); + const program = ts.createProgram([rootFilePath], compilerOptions, host); + const sourceFile = program.getSourceFile(rootFilePath); + if (!sourceFile) { + return null; + } + const checker = program.getTypeChecker(); + + const value = { program, checker, sourceFile }; + __tsProgramByRootFile.set(rootFilePath, value); + return value; +} + +type TypeTableOptions = { + /** When true, include the description column in the output table. */ + includeDescriptions?: boolean; + /** When true, include the default value column in the output table. */ + includeDefaults?: boolean; + /** When true, include the required status column in the output table. */ + includeRequired?: boolean; + /** Base path to resolve relative file paths for AutoTypeTable components. */ + basePath?: string; +}; + +const TABLE_HEADING_DEPTH = 3 as const; + +// Precompiled regex for import type resolution +const IMPORT_TYPE_PATTERN = /import\(["']([^"']+)["']\)\.(\w+)/; + +// Precompiled regex for JSDoc extraction +const JSDOC_PATTERN = /\/\*\*[\s\S]*?\*\//; + +const TRAILING_SLASHES_PATTERN = /\/+$/; + +type ParsedProperty = { + name: string; + property: ObjectType; +}; + +/** + * Parse a JavaScript object literal from an MDX attribute value expression. + * This handles the type object that gets passed to the TypeTable component. + */ +function parseTypeObject( + raw: string | null +): Record | null { + if (!raw) { + return null; + } + + const trimmed = raw.trim(); + + try { + // Use JSON5 for robust parsing of JavaScript-like object literals + const parsed = JSON5.parse(trimmed); + + // Validate the structure + if ( + typeof parsed === "object" && + parsed !== null && + !Array.isArray(parsed) + ) { + // Check if it looks like a valid ObjectType record + const isValid = Object.values(parsed).every( + (value) => + typeof value === "object" && value !== null && "type" in value + ); + + return isValid ? (parsed as Record) : null; + } + + return null; + } catch { + return null; + } +} + +// Use shared createTableCell and createTableRow functions from remark-libs + +function formatPropertyDescription(property: ObjectType): string { + const parts: string[] = []; + + if (property.description) { + const desc = + typeof property.description === "string" + ? property.description + : String(property.description); + parts.push(desc); + } + + if (property.typeDescription) { + const typeDesc = + typeof property.typeDescription === "string" + ? property.typeDescription + : String(property.typeDescription); + parts.push(`(${typeDesc})`); + } + + return parts.join(" ").trim(); +} + +function formatPropertyType(property: ObjectType): string { + let type = property.type; + + if (property.typeDescriptionLink) { + type = `[${type}](${property.typeDescriptionLink})`; + } + + if (property.deprecated) { + type = `~~${type}~~ (deprecated)`; + } + + return type; +} + +function formatPropertyDefault(property: ObjectType): string { + return property.default === "" ? "-" : (property.default ?? "-"); +} + +function formatPropertyRequired(property: ObjectType): string { + return property.required ? "✅ Required" : "Optional"; +} + +// Use shared createHeading and createParagraph functions from remark-libs + +/** + * Resolve a type name by checking if it's an imported type and extracting just the name + */ +function resolveTypeName( + type: ts.Type, + checker: ts.TypeChecker, + sourceFile?: ts.SourceFile, + typeBeingExtracted?: string +): string { + const fullTypeText = checker.typeToString( + type, + undefined, + ts.TypeFormatFlags.NoTruncation + ); + + // Check if this is an imported type (contains 'import("...")') + const importMatch = fullTypeText.match(IMPORT_TYPE_PATTERN); + if (importMatch) { + const importPath = importMatch.at(1); + const importedTypeName = importMatch.at(2); + if (importPath === undefined || importedTypeName === undefined) { + return fullTypeText; + } + + // If this is the type we're currently extracting, just return the type name + if (typeBeingExtracted && importedTypeName === typeBeingExtracted) { + return importedTypeName; + } + + // If we have a source file and the import path points to the same file, + // just return the type name without the import + if (sourceFile && importPath.includes(sourceFile.fileName)) { + return importedTypeName; + } + + // For external imports, return just the type name + return importedTypeName; + } + + // For local types or built-in types, return the full text + return fullTypeText; +} + +function extractJSDocDescription( + node: ts.Node, + sourceFile: ts.SourceFile +): string { + // Get JSDoc comments from the node + const jsDocComments = ts.getJSDocCommentsAndTags(node); + + for (const doc of jsDocComments) { + if (ts.isJSDoc(doc)) { + const comment = doc.comment; + if (typeof comment === "string") { + return comment.trim(); + } + if (Array.isArray(comment)) { + return comment + .map((c) => (typeof c === "string" ? c : c.text)) + .join(" ") + .trim(); + } + } + } + + // Fallback: extract from source text + const fullText = sourceFile.text.substring( + node.getFullStart(), + node.getStart() + ); + const jsDocMatch = fullText.match(JSDOC_PATTERN); + if (jsDocMatch) { + return jsDocMatch[0] + .replace(/\/\*\*|\*\//g, "") + .replace(/\*\s*/g, "") + .trim(); + } + + return ""; +} + +function extractJSDocDefault(node: ts.Node): string { + const jsDocTags = ts.getJSDocTags(node); + for (const tag of jsDocTags) { + if (tag.tagName && tag.tagName.text === "default") { + const comment = tag.comment; + if (typeof comment === "string") { + return comment.trim(); + } + if (Array.isArray(comment)) { + return comment + .map((c) => (typeof c === "string" ? c : c.text)) + .join(" ") + .trim(); + } + } + } + return ""; +} + +function extractPropertyInfo( + property: ts.PropertySignature | ts.PropertyDeclaration, + checker: ts.TypeChecker, + sourceFile: ts.SourceFile, + typeBeingExtracted?: string +): ObjectType { + const type = checker.getTypeAtLocation(property); + const typeText = resolveTypeName( + type, + checker, + sourceFile, + typeBeingExtracted + ); + const isOptional = !!property.questionToken; + + // Try to get JSDoc comment + const description = extractJSDocDescription(property, sourceFile); + + // Try to get default value from JSDoc tags + const defaultValue = extractJSDocDefault(property); + + return { + type: typeText, + description: description || undefined, + required: !isOptional, + default: defaultValue, + }; +} + +function extractInterfaceProperties( + interfaceDecl: ts.InterfaceDeclaration, + checker: ts.TypeChecker, + sourceFile: ts.SourceFile, + typeBeingExtracted?: string +): Record { + const properties: Record = {}; + + for (const member of interfaceDecl.members) { + if (ts.isPropertySignature(member)) { + const name = + member.name && ts.isIdentifier(member.name) ? member.name.text : ""; + if (name) { + properties[name] = extractPropertyInfo( + member, + checker, + sourceFile, + typeBeingExtracted + ); + } + } + } + + return properties; +} + +function isStaticProperty(member: ts.PropertyDeclaration): boolean { + const modifiers = ts.getModifiers(member); + return ( + modifiers?.some((m) => m.kind === ts.SyntaxKind.StaticKeyword) ?? false + ); +} + +function extractClassProperties( + classDecl: ts.ClassDeclaration, + checker: ts.TypeChecker, + sourceFile: ts.SourceFile +): Record { + const properties: Record = {}; + + for (const member of classDecl.members) { + if (ts.isPropertyDeclaration(member) && !isStaticProperty(member)) { + const name = + member.name && ts.isIdentifier(member.name) ? member.name.text : ""; + if (name) { + properties[name] = extractPropertyInfo(member, checker, sourceFile); + } + } + } + + return properties; +} + +/** + * Extract JSDoc description from a property symbol + */ +function extractPropertyDescription( + property: ts.Symbol, + sourceFile: ts.SourceFile +): string { + const declarations = property.getDeclarations(); + const firstDeclaration = declarations?.at(0); + if (firstDeclaration) { + return extractJSDocDescription(firstDeclaration, sourceFile); + } + return ""; +} + +/** + * Extract properties from a type alias with type literal + */ +function extractTypeAliasProperties( + typeAlias: ts.TypeAliasDeclaration, + checker: ts.TypeChecker, + sourceFile: ts.SourceFile, + typeName: string +): Record | null { + const typeNode = typeAlias.type; + + // If it's a type literal (object type), extract properties from it + if (typeNode && ts.isTypeLiteralNode(typeNode)) { + const aliasType = checker.getTypeAtLocation(typeAlias); + const typeAliasText = sourceFile.text.substring( + typeAlias.getStart(), + typeAlias.getEnd() + ); + + if (aliasType.isClassOrInterface()) { + const properties: Record = {}; + const typeProperties = aliasType.getProperties(); + + for (const property of typeProperties) { + const propertyName = property.getName(); + const propertyType = checker.getTypeOfSymbolAtLocation( + property, + sourceFile + ); + const propertyTypeText = resolveTypeName( + propertyType, + checker, + sourceFile, + typeName + ); + + // Check if property is optional by examining the source text + const isOptional = + typeAliasText.includes(`${propertyName}?:`) || + typeAliasText.includes(`${propertyName} ?:`); + + const description = extractPropertyDescription(property, sourceFile); + + properties[propertyName] = { + type: propertyTypeText, + description: description || undefined, + required: !isOptional, + }; + } + + // Only return properties if we found any + return Object.keys(properties).length > 0 ? properties : null; + } + } + + return null; +} + +/** + * Extract type information from a TypeScript file using TypeScript compiler API + */ +function extractPropertiesFromSourceFile( + sourceFile: ts.SourceFile, + typeName: string, + checker: ts.TypeChecker +): Record | null { + // Visit all nodes to find interfaces, classes, and type aliases + let interfaceDecl: ts.InterfaceDeclaration | null = null; + let classDecl: ts.ClassDeclaration | null = null; + let typeAlias: ts.TypeAliasDeclaration | null = null; + + function visit(node: ts.Node) { + if (ts.isInterfaceDeclaration(node) && node.name.text === typeName) { + interfaceDecl = node; + } else if ( + ts.isClassDeclaration(node) && + node.name && + node.name.text === typeName + ) { + classDecl = node; + } else if (ts.isTypeAliasDeclaration(node) && node.name.text === typeName) { + typeAlias = node; + } + ts.forEachChild(node, visit); + } + + visit(sourceFile); + + // Look for interfaces first + if (interfaceDecl) { + return extractInterfaceProperties( + interfaceDecl, + checker, + sourceFile, + typeName + ); + } + + // Look for classes + if (classDecl) { + return extractClassProperties(classDecl, checker, sourceFile); + } + + // Look for type aliases + if (typeAlias) { + // Try to extract properties from type alias + const aliasProperties = extractTypeAliasProperties( + typeAlias, + checker, + sourceFile, + typeName + ); + if (aliasProperties) { + return aliasProperties; + } + + // Fallback: return the type alias itself if we couldn't extract properties + const aliasType = checker.getTypeAtLocation(typeAlias); + const typeText = checker.typeToString(aliasType); + return { + [typeName]: { + type: typeText, + description: `Type alias for ${typeName}`, + required: true, + }, + }; + } + + return null; +} + +export function extractTypeFromFile( + filePath: string, + typeName: string, + basePath?: string +): Record | null { + try { + const normalizeAutoTypeTablePath = ( + rawPath: string, + rawBasePath?: string + ): string => { + if (!rawBasePath) { + return rawPath; + } + + // Authors commonly write `path="./packages/..."` even when `basePath` is already + // pointing at a `.../packages` directory (e.g. `.c15t/packages`). In that case, + // the naive resolution becomes `.../packages/packages/...` and the file can't be found. + const basePathNormalized = rawBasePath + .replaceAll("\\", "/") + .replace(TRAILING_SLASHES_PATTERN, ""); + if (!basePathNormalized.endsWith("/packages")) { + return rawPath; + } + + const pathNormalized = rawPath.replaceAll("\\", "/"); + if (pathNormalized.startsWith("./packages/")) { + return pathNormalized.slice("./packages/".length); + } + if (pathNormalized.startsWith("packages/")) { + return pathNormalized.slice("packages/".length); + } + + return rawPath; + }; + + // Resolve the file path using basePath if provided + const normalizedPath = basePath + ? normalizeAutoTypeTablePath(filePath, basePath) + : filePath; + const resolvedPath = basePath + ? resolve(basePath, normalizedPath) + : filePath; + + if (!existsSync(resolvedPath)) { + return null; + } + + const tsProgram = getTypeScriptProgramForFile(resolvedPath); + if (!tsProgram) { + return null; + } + + return extractPropertiesFromSourceFile( + tsProgram.sourceFile, + typeName, + tsProgram.checker + ); + } catch { + // Silently return null if file can't be found or parsed + return null; + } +} + +function createAutoTypeTable( + properties: ParsedProperty[], + options: TypeTableOptions +): Table { + const { + includeDescriptions = true, + includeDefaults = true, + includeRequired = true, + } = options; + + const headers = ["Property", "Type"]; + if (includeDescriptions) { + headers.push("Description"); + } + if (includeDefaults) { + headers.push("Default"); + } + if (includeRequired) { + headers.push("Required"); + } + + // Generate align array dynamically based on headers + const align = headers.map((header) => + header === "Required" ? "center" : "left" + ); + + const rows = properties.map(({ name, property }) => { + const rowData = [name, formatPropertyType(property)]; + + if (includeDescriptions) { + rowData.push(formatPropertyDescription(property)); + } + + if (includeDefaults) { + rowData.push(formatPropertyDefault(property)); + } + + if (includeRequired) { + rowData.push(formatPropertyRequired(property)); + } + + return rowData; + }); + + return createTable(headers, rows, align); +} + +function addOptionalContent( + content: RootContent[], + title: string | null, + description: string | null +): void { + if (title) { + content.push(createHeading(TABLE_HEADING_DEPTH, title)); + } + if (description) { + content.push(createParagraph(description)); + } +} + +function processAutoTypeTableNode( + node: MdxNode, + options: TypeTableOptions +): RootContent[] { + const title = + normalizeWhitespace(getAttributeValue(node, "title") ?? "") || null; + const description = + normalizeWhitespace(getAttributeValue(node, "description") ?? "") || null; + const autoTypeName = getAttributeValue(node, "name") || "UnknownType"; + const autoTypePath = getAttributeValue(node, "path") || "UnknownPath"; + + const content: RootContent[] = []; + addOptionalContent(content, title, description); + + // Try to extract the actual type information from the TypeScript file + const overrideBasePath = + getAttributeValue(node, "basePath") || options.basePath; + const extractedType = extractTypeFromFile( + autoTypePath, + autoTypeName, + overrideBasePath || options.basePath + ); + + if (extractedType && Object.keys(extractedType).length > 0) { + // Successfully extracted type information - generate full table + const properties: ParsedProperty[] = Object.entries(extractedType).map( + ([name, property]) => ({ + name, + property, + }) + ); + + if (properties.length > 0) { + const table = createAutoTypeTable(properties, options); + content.push(table); + } + } else { + // Fallback to simple info table if extraction failed + const infoTable: Table = u( + "table", + { + align: ["left", "left"], + }, + [ + createTableRow(["Property", "Value"]), + createTableRow(["Type Name", `\`${autoTypeName}\``]), + createTableRow(["Source Path", `\`${autoTypePath}\``]), + ] + ) as Table; + + content.push(infoTable); + + // Add a note about this being an AutoTypeTable + content.push( + createParagraph( + `*AutoTypeTable: Could not extract \`${autoTypeName}\` from \`${autoTypePath}\`. Verify the path/name and that the file is included by your tsconfig.*` + ) + ); + } + + return content; +} + +function isValidTableNode( + node: MdxJsxFlowElement | MdxJsxTextElement +): boolean { + return hasName(node, "TypeTable") || hasName(node, "AutoTypeTable"); +} + +function processTypeTableNode( + node: MdxNode, + options: TypeTableOptions +): RootContent[] { + const { + includeDescriptions = true, + includeDefaults = true, + includeRequired = true, + } = options; + + // Early validation + if (!isValidTableNode(node)) { + return []; + } + + // Handle AutoTypeTable components separately + if (hasName(node, "AutoTypeTable")) { + return processAutoTypeTableNode(node, options); + } + + // Handle regular TypeTable components + const title = + normalizeWhitespace(getAttributeValue(node, "title") ?? "") || null; + const description = + normalizeWhitespace(getAttributeValue(node, "description") ?? "") || null; + const typeRaw = getAttributeValue(node, "type"); + + const typeObject = parseTypeObject(typeRaw); + + if (!typeObject) { + return []; + } + + const properties: ParsedProperty[] = Object.entries(typeObject).map( + ([name, property]) => ({ + name, + property, + }) + ); + + if (properties.length === 0) { + return []; + } + + const headers = ["Property", "Type"]; + if (includeDescriptions) { + headers.push("Description"); + } + if (includeDefaults) { + headers.push("Default"); + } + if (includeRequired) { + headers.push("Required"); + } + + // Generate align array dynamically based on headers + const align = headers.map((header) => + header === "Required" ? "center" : "left" + ); + + const rows = properties.map(({ name, property }) => { + const rowData = [name, formatPropertyType(property)]; + + if (includeDescriptions) { + rowData.push(formatPropertyDescription(property)); + } + + if (includeDefaults) { + rowData.push(formatPropertyDefault(property)); + } + + if (includeRequired) { + rowData.push(formatPropertyRequired(property)); + } + + return rowData; + }); + + const tableRows = [createTableRow(headers), ...rows.map(createTableRow)]; + + const table: Table = u( + "table", + { + align, + }, + tableRows + ) as Table; + + const content: RootContent[] = []; + + if (title) { + content.push(createHeading(TABLE_HEADING_DEPTH, title)); + } + + if (description) { + content.push(createParagraph(description)); + } + + content.push(table); + + return content; +} + +export const remarkTypeTableToMarkdown = ( + opts: Partial = {} +) => { + const defaults: TypeTableOptions = { + includeDescriptions: true, + includeDefaults: true, + includeRequired: true, + basePath: resolve(process.cwd(), ".c15t"), + }; + const resolved = { ...defaults, ...opts }; + + return createJsxComponentProcessor(["TypeTable", "AutoTypeTable"], (node) => { + if (hasName(node, "AutoTypeTable")) { + return processAutoTypeTableNode(node, resolved); + } + return processTypeTableNode(node, resolved); + }); +}; diff --git a/packages/docs/tsconfig.json b/packages/docs/tsconfig.json new file mode 100644 index 0000000..2f0ddd0 --- /dev/null +++ b/packages/docs/tsconfig.json @@ -0,0 +1,14 @@ +{ + "$schema": "https://json.schemastore.org/tsconfig", + "extends": "@repo/typescript-config/react-library.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src", + "module": "ESNext", + "moduleResolution": "Bundler", + "verbatimModuleSyntax": false, + "noUncheckedIndexedAccess": false + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist", "**/*.test.ts", "**/*.test.tsx"] +} diff --git a/packages/docs/tsup.config.ts b/packages/docs/tsup.config.ts new file mode 100644 index 0000000..88f2d94 --- /dev/null +++ b/packages/docs/tsup.config.ts @@ -0,0 +1,39 @@ +import { defineConfig } from "tsup"; + +export default defineConfig({ + entry: { + "components/index": "src/components/index.ts", + "remark/index": "src/remark/index.ts", + "convert/index": "src/convert/index.ts", + "llm/index": "src/llm/index.ts", + "lint/index": "src/lint/index.ts", + "lint/cli": "src/lint/cli.ts", + }, + format: ["esm"], + dts: true, + clean: true, + sourcemap: true, + target: "es2022", + splitting: false, + treeshake: true, + onSuccess: async () => { + const { chmod, readFile, writeFile } = await import("node:fs/promises"); + const cli = "dist/lint/cli.js"; + const contents = await readFile(cli, "utf8"); + if (!contents.startsWith("#!")) { + await writeFile(cli, `#!/usr/bin/env node\n${contents}`); + } + await chmod(cli, 0o755); + }, + external: [ + "react", + "react-dom", + "next", + "typescript", + "fs", + "path", + "node:fs", + "node:path", + "node:fs/promises", + ], +}); diff --git a/scripts/resolve-workspace-deps.ts b/scripts/resolve-workspace-deps.ts new file mode 100644 index 0000000..cb9fc66 --- /dev/null +++ b/scripts/resolve-workspace-deps.ts @@ -0,0 +1,191 @@ +#!/usr/bin/env bun + +/** + * Resolves `workspace:*`, `workspace:^`, and `workspace:~` protocols + * in workspace package manifests before publishing to npm. + * + * changesets + npm publish doesn't resolve these automatically. + */ + +import { readdir, readFile, writeFile } from "node:fs/promises"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; + +type PackageJson = { + name?: string; + version?: string; + private?: boolean; + dependencies?: Record; + devDependencies?: Record; + peerDependencies?: Record; + optionalDependencies?: Record; +}; + +type WorkspacePackage = { + path: string; + manifest: PackageJson; +}; + +const ROOT = path.resolve(path.dirname(fileURLToPath(import.meta.url)), ".."); +const WORKSPACE_DIRS = ["packages", "apps"]; +const DEP_FIELDS = [ + "dependencies", + "devDependencies", + "peerDependencies", + "optionalDependencies", +] as const; + +async function listDirs(dirPath: string): Promise { + try { + const entries = await readdir(dirPath, { withFileTypes: true }); + return entries + .filter((entry) => entry.isDirectory()) + .map((entry) => entry.name); + } catch { + return []; + } +} + +async function readPackageJson( + packageJsonPath: string +): Promise { + try { + const raw = await readFile(packageJsonPath, "utf8"); + return JSON.parse(raw) as PackageJson; + } catch { + return null; + } +} + +async function getWorkspacePackages(): Promise { + const workspacePackages: WorkspacePackage[] = []; + + for (const workspaceDir of WORKSPACE_DIRS) { + const workspaceDirPath = path.join(ROOT, workspaceDir); + const subDirs = await listDirs(workspaceDirPath); + + for (const subDir of subDirs) { + const packageJsonPath = path.join( + workspaceDirPath, + subDir, + "package.json" + ); + const manifest = await readPackageJson(packageJsonPath); + if (!manifest?.name) { + continue; + } + + workspacePackages.push({ + path: packageJsonPath, + manifest, + }); + } + } + + const rootManifestPath = path.join(ROOT, "package.json"); + const rootManifest = await readPackageJson(rootManifestPath); + if (rootManifest?.name && !rootManifest.private) { + workspacePackages.push({ + path: rootManifestPath, + manifest: rootManifest, + }); + } + + return workspacePackages; +} + +function resolveWorkspaceProtocol( + value: string, + resolvedVersion: string +): string { + if (value === "workspace:*") { + return resolvedVersion; + } + if (value === "workspace:^") { + return `^${resolvedVersion}`; + } + if (value === "workspace:~") { + return `~${resolvedVersion}`; + } + if (value.startsWith("workspace:")) { + return value.replace("workspace:", ""); + } + return value; +} + +async function resolveAllWorkspaceDependencies(): Promise { + const workspacePackages = await getWorkspacePackages(); + const versionByPackageName = new Map(); + + for (const pkg of workspacePackages) { + if (pkg.manifest.name && pkg.manifest.version) { + versionByPackageName.set(pkg.manifest.name, pkg.manifest.version); + } + } + + process.stdout.write( + `Found ${versionByPackageName.size} workspace packages: ${JSON.stringify( + Object.fromEntries(versionByPackageName) + )}\n` + ); + + let totalResolved = 0; + + for (const pkg of workspacePackages) { + // Private packages never get published, so their workspace: deps don't + // need rewriting — and keeping them as workspace:* preserves local + // linkage if this script is ever run outside CI. + if (pkg.manifest.private) { + continue; + } + + let modified = false; + + for (const field of DEP_FIELDS) { + const deps = pkg.manifest[field]; + if (!deps) { + continue; + } + + for (const [depName, depRange] of Object.entries(deps)) { + if (!depRange.startsWith("workspace:")) { + continue; + } + + const resolvedVersion = versionByPackageName.get(depName); + if (!resolvedVersion) { + process.stderr.write( + ` ${pkg.manifest.name}: ${depName} ${depRange} -> NOT FOUND in workspace\n` + ); + continue; + } + + const resolvedRange = resolveWorkspaceProtocol( + depRange, + resolvedVersion + ); + if (resolvedRange !== depRange) { + deps[depName] = resolvedRange; + process.stdout.write( + ` ${pkg.manifest.name}: ${depName} ${depRange} -> ${resolvedRange}\n` + ); + modified = true; + totalResolved += 1; + } + } + } + + if (modified) { + await writeFile(pkg.path, `${JSON.stringify(pkg.manifest, null, 2)}\n`); + } + } + + process.stdout.write(`\nResolved ${totalResolved} workspace: references.\n`); +} + +resolveAllWorkspaceDependencies().catch((error) => { + process.stderr.write( + `Failed to resolve workspace dependencies: ${String(error)}\n` + ); + process.exit(1); +}); From cd02443cf4111b84eb4f172bae9e1cdffd90d36b Mon Sep 17 00:00:00 2001 From: Kaylee <65376239+KayleeWilliams@users.noreply.github.com> Date: Fri, 17 Apr 2026 18:11:58 +0100 Subject: [PATCH 3/7] perf: parallelize convertAllMdx + LLM readMarkdownDocs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - convertAllMdx now uses bounded concurrency (default min(cpus, 16)) via mapLimit instead of a sequential for-loop - Pre-create output directories upfront so per-file workers skip repeated mkdir calls on shared parents - Drop the redundant access() check — readFile errors on missing file - readMarkdownDocs in LLM gen swaps for-loop for Promise.all - Expose `concurrency` on MdxToMarkdownConfig for override - Document that enrichFrontmatterFromGit requires fetch-depth: 0 in CI Real-content test against c15t (200 .mdx, git enrichment on): before: 4031ms after: 1452ms (2.8x) --- packages/docs/src/convert/convert.ts | 68 +++++++++++++++++++++++----- packages/docs/src/llm/llm.ts | 52 ++++++++++----------- 2 files changed, 83 insertions(+), 37 deletions(-) diff --git a/packages/docs/src/convert/convert.ts b/packages/docs/src/convert/convert.ts index e879808..5c79bba 100644 --- a/packages/docs/src/convert/convert.ts +++ b/packages/docs/src/convert/convert.ts @@ -1,6 +1,7 @@ import { execFile } from "node:child_process"; import { existsSync } from "node:fs"; -import { access, mkdir, readFile, writeFile } from "node:fs/promises"; +import { mkdir, readFile, writeFile } from "node:fs/promises"; +import { cpus } from "node:os"; import { basename, dirname, join, relative, resolve, sep } from "node:path"; import { promisify } from "node:util"; import fg from "fast-glob"; @@ -13,6 +14,39 @@ import { log } from "../internal/logger"; const execFileAsync = promisify(execFile); +const DEFAULT_CONCURRENCY = Math.max(2, Math.min(cpus().length, 16)); + +/** + * Run `fn` on every item in `items` with at most `limit` in-flight concurrent + * calls. Uses a shared cursor so fast workers pull from the queue — keeps + * throughput high when file conversion times vary (some hit git, some don't). + */ +async function mapLimit( + items: readonly T[], + limit: number, + fn: (item: T, index: number) => Promise +): Promise { + const results: R[] = new Array(items.length); + let cursor = 0; + const workerCount = Math.max(1, Math.min(limit, items.length)); + const workers = Array.from({ length: workerCount }, async () => { + while (true) { + const index = cursor; + cursor += 1; + if (index >= items.length) { + return; + } + const item = items[index]; + if (item === undefined) { + return; + } + results[index] = await fn(item, index); + } + }); + await Promise.all(workers); + return results; +} + const FRONTMATTER_REGEX = /^---\n([\s\S]*?)\n---\n([\s\S]*)$/; const HEADING_REGEX = /^#\s+(.+)$/m; const YAML_QUOTE_REGEX = /["\\]/g; @@ -199,8 +233,15 @@ export type MdxToMarkdownConfig = { * If true, inject `lastModified` (ISO-8601) and `lastAuthor` into the * output frontmatter by running `git log -1` against each source file. * Silently skipped for files that are untracked or when git is unavailable. + * Requires `fetch-depth: 0` when run in `actions/checkout` — shallow clones + * return empty git log for files not touched in the single fetched commit. */ enrichFrontmatterFromGit?: boolean; + /** + * Max number of files to convert in parallel. Defaults to + * `min(cpuCount, 16)` with a floor of 2. + */ + concurrency?: number; }; type GitEnrichment = { @@ -341,12 +382,6 @@ async function processMdxFile( writeToStdout = false ): Promise { const resolvedPath = resolve(mdxFilePath); - try { - await access(resolvedPath); - } catch { - log.error(`File not found: ${resolvedPath}`); - return false; - } if (!resolvedPath.endsWith(".mdx")) { log.error(`Not an MDX file: ${resolvedPath}`); @@ -433,9 +468,19 @@ export async function convertAllMdx( const remarkPlugins = config.remarkPlugins ?? []; const enrichFromGitFlag = config.enrichFrontmatterFromGit ?? false; - let converted = 0; + const concurrency = config.concurrency ?? DEFAULT_CONCURRENCY; + // Pre-create every output directory in parallel so the per-file workers + // don't repeatedly mkdir the same parent. + const outputDirs = new Set(); for (const mdxFilePath of mdxFiles) { + outputDirs.add(dirname(deriveOutputPath(mdxFilePath, srcDir, outDir))); + } + await Promise.all( + Array.from(outputDirs, (dir) => mkdir(dir, { recursive: true })) + ); + + const results = await mapLimit(mdxFiles, concurrency, async (mdxFilePath) => { try { const { markdown } = await convertMdxFile( mdxFilePath, @@ -443,13 +488,14 @@ export async function convertAllMdx( enrichFromGitFlag ); const outputPath = deriveOutputPath(mdxFilePath, srcDir, outDir); - await mkdir(dirname(outputPath), { recursive: true }); await writeFile(outputPath, markdown); - converted += 1; + return true; } catch (fileError) { log.error(`Failed to process ${mdxFilePath}: ${String(fileError)}`); + return false; } - } + }); + const converted = results.filter(Boolean).length; log.verbose(`Converted ${converted} MDX files`); } diff --git a/packages/docs/src/llm/llm.ts b/packages/docs/src/llm/llm.ts index fca9839..fcdd842 100644 --- a/packages/docs/src/llm/llm.ts +++ b/packages/docs/src/llm/llm.ts @@ -226,32 +226,32 @@ async function readMarkdownDocs( } const files = await collectFiles(docsDir, [".md"]); - const docs: MarkdownDoc[] = []; - - for (const filePath of files) { - const relativePath = path - .relative(docsDir, filePath) - .replace(WINDOWS_PATH_PATTERN, "/"); - const raw = await readFile(filePath, "utf-8"); - const parsed = matter(raw); - const title = - String(parsed.data.title ?? "").trim() || - titleize(path.basename(relativePath, ".md")) || - "Untitled"; - const description = normalizeDescription( - String(parsed.data.description ?? "") - ); - const urlPath = toUrlPath(relativePath); - - docs.push({ - title, - description, - urlPath, - absoluteUrl: toAbsoluteUrl(urlPath, baseUrl), - relativePath: relativePath.replace(MD_ONLY_EXTENSION_PATTERN, ""), - content: parsed.content.trim(), - }); - } + const docs = await Promise.all( + files.map(async (filePath) => { + const relativePath = path + .relative(docsDir, filePath) + .replace(WINDOWS_PATH_PATTERN, "/"); + const raw = await readFile(filePath, "utf-8"); + const parsed = matter(raw); + const title = + String(parsed.data.title ?? "").trim() || + titleize(path.basename(relativePath, ".md")) || + "Untitled"; + const description = normalizeDescription( + String(parsed.data.description ?? "") + ); + const urlPath = toUrlPath(relativePath); + + return { + title, + description, + urlPath, + absoluteUrl: toAbsoluteUrl(urlPath, baseUrl), + relativePath: relativePath.replace(MD_ONLY_EXTENSION_PATTERN, ""), + content: parsed.content.trim(), + }; + }) + ); return docs.sort((left, right) => left.urlPath.localeCompare(right.urlPath)); } From 2cf181d3d9333c887db956d4888038ec4b15a65f Mon Sep 17 00:00:00 2001 From: Kaylee <65376239+KayleeWilliams@users.noreply.github.com> Date: Sat, 18 Apr 2026 11:56:48 +0100 Subject: [PATCH 4/7] ci: add non-blocking benchmark workflow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - apps/docs-smoke/scripts/bench.ts runs convert + LLM gen 3x against the c15t clone and reports median/min/max as a markdown table - .github/workflows/bench.yml runs on PRs touching packages/docs or apps/docs-smoke and writes the table to $GITHUB_STEP_SUMMARY - continue-on-error: true — no threshold gating because GH Actions shared runners are too noisy (20-30% variance) for reliable fail-on-regression Local numbers (200 .mdx files, git enrichment on, 3 runs): convert median 1889ms llm median 9ms convert+llm median 1897ms --- .github/workflows/bench.yml | 46 +++++++++ apps/docs-smoke/package.json | 3 +- apps/docs-smoke/scripts/bench.ts | 162 +++++++++++++++++++++++++++++++ 3 files changed, 210 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/bench.yml create mode 100644 apps/docs-smoke/scripts/bench.ts diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml new file mode 100644 index 0000000..c1cf7d4 --- /dev/null +++ b/.github/workflows/bench.yml @@ -0,0 +1,46 @@ +name: Benchmark + +on: + pull_request: + branches: + - main + paths: + - "packages/docs/**" + - "apps/docs-smoke/**" + - ".github/workflows/bench.yml" + +# Don't stack benchmarks on rapid pushes — last one wins. +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + bench: + name: Convert + LLM pipeline + runs-on: ubuntu-latest + # Non-blocking — numbers go to the job summary; no threshold gating + # because shared runners are too noisy for reliable regression detection. + continue-on-error: true + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + # git enrichment needs full history + fetch-depth: 0 + + - name: Setup Bun + uses: oven-sh/setup-bun@v1 + with: + bun-version: latest + + - name: Install dependencies + run: bun install --frozen-lockfile + + - name: Build @inth/docs + run: cd packages/docs && bun run build + + - name: Run benchmark + env: + BENCH_RUNS: "3" + run: cd apps/docs-smoke && bun run bench diff --git a/apps/docs-smoke/package.json b/apps/docs-smoke/package.json index a5d0579..6407246 100644 --- a/apps/docs-smoke/package.json +++ b/apps/docs-smoke/package.json @@ -8,7 +8,8 @@ "llm": "bun run scripts/llm-generate.ts", "build": "bun run convert && bun run llm", "setup:real": "bun run scripts/setup-real-content.ts", - "test:real": "bun run setup:real && bun run scripts/test-real.ts" + "test:real": "bun run setup:real && bun run scripts/test-real.ts", + "bench": "bun run setup:real && bun run scripts/bench.ts" }, "dependencies": { "@inth/docs": "workspace:*" diff --git a/apps/docs-smoke/scripts/bench.ts b/apps/docs-smoke/scripts/bench.ts new file mode 100644 index 0000000..8f2ff6a --- /dev/null +++ b/apps/docs-smoke/scripts/bench.ts @@ -0,0 +1,162 @@ +#!/usr/bin/env bun +/** + * Benchmark the @inth/docs pipeline against the cloned c15t docs. + * Runs each stage N times, reports median/min/max as a markdown table. + * Writes to $GITHUB_STEP_SUMMARY when present so CI surfaces the numbers + * on the PR checks page. No threshold gating — GH Actions shared runners + * are too noisy (20–30% variance) for fail-on-regression to be reliable. + */ + +import { existsSync } from "node:fs"; +import { appendFile, rm } from "node:fs/promises"; +import { join } from "node:path"; +import { convertAllMdx } from "@inth/docs/convert"; +import { generateLLMFullFiles, generateLLMSummaries } from "@inth/docs/llm"; +import { defaultRemarkPlugins, remarkInclude } from "@inth/docs/remark"; + +const RUNS = Number(process.env.BENCH_RUNS ?? 3); +const FIXTURE_DIR = join(process.cwd(), "content-fixtures", "c15t"); +const SRC_DIR = join(FIXTURE_DIR, "docs"); +const OUT_DIR = join(process.cwd(), "public-bench"); +// LLM gen expects .md files under `{outDir}/docs/`, so convert writes into +// `OUT_DIR/docs/` to match the convention. +const CONVERT_OUT_DIR = join(OUT_DIR, "docs"); + +if (!existsSync(SRC_DIR)) { + process.stderr.write( + "content-fixtures/c15t not found — run `bun run setup:real` first.\n" + ); + process.exit(1); +} + +interface Stats { + label: string; + runs: number[]; +} + +function median(values: number[]): number { + const sorted = [...values].sort((a, b) => a - b); + const mid = Math.floor(sorted.length / 2); + if (sorted.length % 2 === 0) { + return Math.round(((sorted[mid - 1] ?? 0) + (sorted[mid] ?? 0)) / 2); + } + return sorted[mid] ?? 0; +} + +async function timed(fn: () => Promise): Promise { + const start = performance.now(); + await fn(); + return Math.round(performance.now() - start); +} + +async function bench(): Promise { + const convertRuns: number[] = []; + const llmRuns: number[] = []; + + for (let i = 0; i < RUNS; i++) { + await rm(OUT_DIR, { recursive: true, force: true }); + + const convertMs = await timed(() => + convertAllMdx({ + srcDir: SRC_DIR, + outDir: CONVERT_OUT_DIR, + remarkPlugins: [remarkInclude, ...defaultRemarkPlugins], + enrichFrontmatterFromGit: true, + }) + ); + + const llmMs = await timed(async () => { + await generateLLMSummaries({ + srcDir: SRC_DIR, + outDir: OUT_DIR, + baseUrl: "https://docs.example.com", + product: { + name: "Bench SDK", + summary: "Benchmark fixture.", + bestStartingPoints: [], + }, + docsSections: [ + { + title: "Frameworks", + links: [{ urlPath: "/docs/frameworks" }], + }, + { + title: "Integrations", + links: [{ urlPath: "/docs/integrations/overview" }], + }, + ], + }); + await generateLLMFullFiles({ + outDir: OUT_DIR, + baseUrl: "https://docs.example.com", + product: { name: "Bench SDK" }, + topics: [ + { + slug: "frameworks", + title: "Frameworks", + description: "Framework-specific guides.", + includePrefixes: ["frameworks/"], + }, + { + slug: "integrations", + title: "Integrations", + description: "Integration guides.", + includePrefixes: ["integrations/"], + }, + { + slug: "self-host", + title: "Self-host", + description: "Self-hosting guides.", + includePrefixes: ["self-host/"], + }, + ], + }); + }); + + convertRuns.push(convertMs); + llmRuns.push(llmMs); + + process.stdout.write( + `run ${i + 1}/${RUNS}: convert=${convertMs}ms llm=${llmMs}ms\n` + ); + } + + await rm(OUT_DIR, { recursive: true, force: true }); + + return [ + { label: "convert", runs: convertRuns }, + { label: "llm", runs: llmRuns }, + { + label: "convert+llm", + runs: convertRuns.map((c, i) => c + (llmRuns[i] ?? 0)), + }, + ]; +} + +function renderTable(stats: Stats[]): string { + const lines = [ + "| stage | median | min | max | runs |", + "| --- | ---: | ---: | ---: | :--- |", + ]; + for (const stat of stats) { + const m = median(stat.runs); + const min = Math.min(...stat.runs); + const max = Math.max(...stat.runs); + const series = stat.runs.map((x) => `${x}ms`).join(", "); + lines.push( + `| \`${stat.label}\` | ${m}ms | ${min}ms | ${max}ms | ${series} |` + ); + } + return lines.join("\n"); +} + +const stats = await bench(); +const table = renderTable(stats); +const header = `### @inth/docs benchmark\n\nFixture: c15t docs (${200} .mdx files), git enrichment on, ${RUNS} runs each.\n\n`; +const report = header + table; + +process.stdout.write(`\n${report}\n`); + +if (process.env.GITHUB_STEP_SUMMARY) { + await appendFile(process.env.GITHUB_STEP_SUMMARY, `${report}\n`); +} From 8492e53b4626a7c36e65b06c4578f67c50c7ef8b Mon Sep 17 00:00:00 2001 From: Kaylee <65376239+KayleeWilliams@users.noreply.github.com> Date: Sat, 18 Apr 2026 12:39:11 +0100 Subject: [PATCH 5/7] =?UTF-8?q?fix:=20address=20CodeRabbit=20review=20?= =?UTF-8?q?=E2=80=94=20bugs,=20security,=20CRLF,=20accessibility?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bugs / correctness - FRONTMATTER_REGEX now accepts CRLF in both convert.ts and include.remark.ts so Windows-checked-out docs don't lose their source frontmatter (comment #15, #31) - enrichFromGit uses NUL separator so author names containing `|` survive round-trip (comment #42) - content-processor joins multi-fragment paragraphs / blockquotes instead of keeping only the first (comment #26) - text.ts extractTableContent renders every row, not just header + first row (comment #29) - steps.remark preserves inline markup (code, links, strong) when folding the leading paragraph into the step title instead of flattening via mdastToString (comment #35) - LLM readSourceDocs throws on duplicate route collisions rather than silently overwriting (comment #45) - renderRootFullRouter only advertises /docs/llms.txt when that file was actually written (comment #25) Security - GitHub Actions reporter now escapes %/CR/LF/:/, in workflow-command properties and %/CR/LF in message bodies (comment #21) - lint CLI rejects flag-like tokens where an option value is required so `--src --format json` surfaces a usage error (comment #19) - generateLLMFullFiles validates topic.slug against a URL-safe pattern before using it in paths/URLs (comment #46) Schema / config - changelog semver pattern now accepts prerelease + build metadata per SemVer 2.0.0 (comment #23) - changelogDir is resolved against srcDir so relative subdir configs work from any cwd (comment #22) - changesets config $schema pinned to 2.30.0 (comment #2) Accessibility - Tab panels now render the id + aria-labelledby matching their triggers' aria-controls/id (comment #13) - PackageCommandTabs drops role=tablist/tab (we don't implement the roving- focus / arrow-key pattern) and uses aria-pressed on a plain button group (comment #12) - type-table treats `""` as a real default value instead of rendering "—" (comment #14) Infra - bench.yml gets timeout-minutes: 20 (comment #47) - BENCH_RUNS is validated as a positive integer (comment #48) - turbo.json build outputs include dist/** and build/** for packages (comment #41) - convert logs when srcDir is missing and reports failed file count (comments #43, #44) - setup-real-content supports C15T_REF for reproducible bench/test numbers (comment #7) - test-real cleans OUT_DIR before each run and asserts exact mdCount === mdxCount (comment #8) - husky pre-commit no longer masks unfixable ultracite errors behind `set -e` (comment #5) - release.yml documents the npm promise-retry workaround with an issue link (comment #4) - typescript-config drops the redundant publishConfig block (comment #38) - smoke doc fixture says @inth/docs, not @inth/optin-docs (comment #6) Deferred - #3 (OIDC on private repo): blocker, needs user decision to either make the repo public or switch back to NPM_TOKEN - #10, #16, #20, #24, #11: disagree with rationale (see PR thread) - #17/#18/#27/#28/#30/#33/#34/#37/#39/#40: stylistic nitpicks, skipping - #32, #36: complex ported logic, tracked as known limitations --- .changeset/config.json | 2 +- .github/workflows/bench.yml | 1 + .github/workflows/release.yml | 6 +- .husky/pre-commit | 9 +-- apps/docs-smoke/content/docs/index.mdx | 2 +- apps/docs-smoke/scripts/bench.ts | 13 +++- apps/docs-smoke/scripts/setup-real-content.ts | 15 +++-- apps/docs-smoke/scripts/test-real.ts | 18 +++++- .../src/components/package-command-tabs.tsx | 8 ++- packages/docs/src/components/tabs.tsx | 15 +++-- packages/docs/src/components/type-table.tsx | 8 ++- packages/docs/src/convert/convert.ts | 16 +++-- packages/docs/src/lint/cli.ts | 35 +++++----- packages/docs/src/lint/reporters.ts | 26 +++++++- packages/docs/src/lint/runner.ts | 10 ++- packages/docs/src/lint/schema.ts | 10 ++- packages/docs/src/llm/llm.ts | 52 ++++++++++++--- .../docs/src/remark/libs/content-processor.ts | 40 ++++++++---- packages/docs/src/remark/libs/text.ts | 64 ++++++------------- .../docs/src/remark/plugins/include.remark.ts | 2 +- .../docs/src/remark/plugins/steps.remark.ts | 19 +++--- packages/typescript-config/package.json | 5 +- turbo.json | 2 +- 23 files changed, 241 insertions(+), 137 deletions(-) diff --git a/.changeset/config.json b/.changeset/config.json index 234c875..eacb836 100644 --- a/.changeset/config.json +++ b/.changeset/config.json @@ -1,5 +1,5 @@ { - "$schema": "https://unpkg.com/@changesets/config@latest/schema.json", + "$schema": "https://unpkg.com/@changesets/config@2.30.0/schema.json", "changelog": "@changesets/cli/changelog", "commit": false, "access": "public", diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index c1cf7d4..e6f124c 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -18,6 +18,7 @@ jobs: bench: name: Convert + LLM pipeline runs-on: ubuntu-latest + timeout-minutes: 20 # Non-blocking — numbers go to the job summary; no threshold gating # because shared runners are too noisy for reliable regression detection. continue-on-error: true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d51e13f..98c0229 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -41,8 +41,10 @@ jobs: registry-url: "https://registry.npmjs.org" # Upgrade npm for trusted publishing (OIDC provenance requires npm 11+). - # Node.js 22's bundled npm has a missing promise-retry module that breaks - # `npm install -g`, so we install it first as a workaround. + # Workaround: Node.js 22's bundled npm is missing the `promise-retry` + # module which breaks `npm install -g`. We install it first then upgrade + # npm itself. Revisit once Node.js 22 ships a fixed npm. + # Ref: https://github.com/npm/cli/issues/7657 - name: Upgrade npm for provenance publishing run: | npm install -g promise-retry 2>/dev/null || true diff --git a/.husky/pre-commit b/.husky/pre-commit index fcd87e7..bde5f37 100644 --- a/.husky/pre-commit +++ b/.husky/pre-commit @@ -1,10 +1,7 @@ #!/bin/sh -# Exit on any error -set -e +# Note: we handle errors explicitly so FORMAT_EXIT_CODE is reachable. -bun test --pass-with-no-tests - -# ultracite +bun test --pass-with-no-tests || exit $? # Check if there are any staged files STAGED_FILES=$(git diff --cached --name-only --diff-filter=ACMR) @@ -13,7 +10,7 @@ if [ -z "$STAGED_FILES" ]; then exit 0 fi -# Run formatter +# Run formatter (don't abort on non-zero so we can report unfixable issues) bun x ultracite fix FORMAT_EXIT_CODE=$? diff --git a/apps/docs-smoke/content/docs/index.mdx b/apps/docs-smoke/content/docs/index.mdx index da3b758..4f8b69c 100644 --- a/apps/docs-smoke/content/docs/index.mdx +++ b/apps/docs-smoke/content/docs/index.mdx @@ -6,7 +6,7 @@ description: "Welcome to the Smoke SDK docs. Exercise @inth/docs end-to-end." # Smoke SDK - This is a fixture page that mirrors how the monorepo apps consume `@inth/optin-docs`. + This is a fixture page that mirrors how consumer apps integrate with `@inth/docs`. Pick your path below. diff --git a/apps/docs-smoke/scripts/bench.ts b/apps/docs-smoke/scripts/bench.ts index 8f2ff6a..47cf45e 100644 --- a/apps/docs-smoke/scripts/bench.ts +++ b/apps/docs-smoke/scripts/bench.ts @@ -14,7 +14,18 @@ import { convertAllMdx } from "@inth/docs/convert"; import { generateLLMFullFiles, generateLLMSummaries } from "@inth/docs/llm"; import { defaultRemarkPlugins, remarkInclude } from "@inth/docs/remark"; -const RUNS = Number(process.env.BENCH_RUNS ?? 3); +const DEFAULT_RUNS = 3; +const parsedRuns = Number.parseInt( + process.env.BENCH_RUNS ?? String(DEFAULT_RUNS), + 10 +); +if (!Number.isInteger(parsedRuns) || parsedRuns < 1) { + process.stderr.write( + `BENCH_RUNS must be a positive integer, got ${JSON.stringify(process.env.BENCH_RUNS)}\n` + ); + process.exit(2); +} +const RUNS = parsedRuns; const FIXTURE_DIR = join(process.cwd(), "content-fixtures", "c15t"); const SRC_DIR = join(FIXTURE_DIR, "docs"); const OUT_DIR = join(process.cwd(), "public-bench"); diff --git a/apps/docs-smoke/scripts/setup-real-content.ts b/apps/docs-smoke/scripts/setup-real-content.ts index 5c2da84..8edab82 100644 --- a/apps/docs-smoke/scripts/setup-real-content.ts +++ b/apps/docs-smoke/scripts/setup-real-content.ts @@ -11,18 +11,25 @@ import { join } from "node:path"; import { $ } from "bun"; const REPO = "https://github.com/c15t/c15t.git"; +// Pin to a specific ref so bench/test numbers are reproducible. Override with +// C15T_REF= to test against upstream changes. +const FIXTURE_REF = process.env.C15T_REF ?? "main"; const FIXTURE_DIR = join(process.cwd(), "content-fixtures", "c15t"); await mkdir(join(process.cwd(), "content-fixtures"), { recursive: true }); if (existsSync(join(FIXTURE_DIR, ".git"))) { - process.stdout.write(`Updating existing clone at ${FIXTURE_DIR}\n`); - await $`git -C ${FIXTURE_DIR} fetch --depth=1 origin`.quiet(); - await $`git -C ${FIXTURE_DIR} reset --hard origin/HEAD`.quiet(); + process.stdout.write( + `Updating existing clone at ${FIXTURE_DIR} → ${FIXTURE_REF}\n` + ); + await $`git -C ${FIXTURE_DIR} fetch --depth=1 origin ${FIXTURE_REF}`.quiet(); + await $`git -C ${FIXTURE_DIR} reset --hard FETCH_HEAD`.quiet(); } else { - process.stdout.write(`Cloning ${REPO} → ${FIXTURE_DIR}\n`); + process.stdout.write(`Cloning ${REPO} @ ${FIXTURE_REF} → ${FIXTURE_DIR}\n`); await $`git clone --depth=1 --filter=blob:none --sparse ${REPO} ${FIXTURE_DIR}`.quiet(); await $`git -C ${FIXTURE_DIR} sparse-checkout set docs`.quiet(); + await $`git -C ${FIXTURE_DIR} fetch --depth=1 origin ${FIXTURE_REF}`.quiet(); + await $`git -C ${FIXTURE_DIR} reset --hard FETCH_HEAD`.quiet(); } process.stdout.write("Real content ready.\n"); diff --git a/apps/docs-smoke/scripts/test-real.ts b/apps/docs-smoke/scripts/test-real.ts index e208c93..7b9b2cf 100644 --- a/apps/docs-smoke/scripts/test-real.ts +++ b/apps/docs-smoke/scripts/test-real.ts @@ -7,7 +7,7 @@ */ import { existsSync } from "node:fs"; -import { readdir } from "node:fs/promises"; +import { readdir, rm } from "node:fs/promises"; import { join } from "node:path"; import { convertAllMdx } from "@inth/docs/convert"; import { lintDocs } from "@inth/docs/lint"; @@ -49,6 +49,9 @@ async function countFiles(dir: string, ext: string): Promise { } process.stdout.write(`Converting real c15t docs from ${SRC_DIR}\n`); +// Start from a clean output directory so a prior run can't mask dropped pages. +await rm(OUT_DIR, { recursive: true, force: true }); + const start = Date.now(); await convertAllMdx({ srcDir: SRC_DIR, @@ -62,9 +65,9 @@ const mdxCount = await countFiles(SRC_DIR, ".mdx"); const mdCount = await countFiles(OUT_DIR, ".md"); process.stdout.write(` ${mdxCount} .mdx → ${mdCount} .md in ${elapsed}ms\n`); -if (mdCount < mdxCount * 0.9) { +if (mdCount !== mdxCount) { process.stderr.write( - `FAIL: expected at least ${Math.floor(mdxCount * 0.9)} markdown files, got ${mdCount}\n` + `FAIL: expected ${mdxCount} markdown files, got ${mdCount}\n` ); process.exit(1); } @@ -75,4 +78,13 @@ process.stdout.write( ` ${result.summary.filesScanned} files scanned — ${result.summary.errors} error(s), ${result.summary.warnings} warning(s)\n` ); +// Lint findings reflect the fixture repo's content, not our pipeline — so +// they're informational. The hard pass/fail signal above (mdCount === mdxCount) +// is what gates CI. +if (result.summary.errors > 0) { + process.stdout.write( + " (lint errors above are issues in c15t's content, not @inth/docs)\n" + ); +} + process.stdout.write("\nReal-content test passed.\n"); diff --git a/packages/docs/src/components/package-command-tabs.tsx b/packages/docs/src/components/package-command-tabs.tsx index 57b5f4a..00b6744 100644 --- a/packages/docs/src/components/package-command-tabs.tsx +++ b/packages/docs/src/components/package-command-tabs.tsx @@ -41,15 +41,17 @@ export function PackageCommandTabs({ return (
    -
    + {/* Plain button group — intentionally not using role="tablist" / + role="tab" since we don't implement the full tabs keyboard pattern + (roving tabindex, ArrowLeft/Right, associated tabpanel). */} +
    {MANAGERS.map((manager) => (
    + {resolved ? (
               {resolved}
    diff --git a/packages/docs/src/components/tabs.tsx b/packages/docs/src/components/tabs.tsx
    index 5b1d131..1953ce7 100644
    --- a/packages/docs/src/components/tabs.tsx
    +++ b/packages/docs/src/components/tabs.tsx
    @@ -2,6 +2,7 @@
     
     import {
       createContext,
    +  type KeyboardEvent,
       type ReactNode,
       useContext,
       useId,
    @@ -30,6 +31,19 @@ function normalize(value: string): string {
       return value.toLowerCase().replace(/\s+/g, "-");
     }
     
    +/**
    + * Build a stable id for a tab given its index. We include the index so two
    + * items that normalize to the same string (e.g. "Tab 1" and "tab 1") still
    + * get distinct ids.
    + */
    +function triggerId(groupId: string, normalized: string, index: number): string {
    +  return `${groupId}-trigger-${normalized}-${index}`;
    +}
    +
    +function panelId(groupId: string, normalized: string, index: number): string {
    +  return `${groupId}-panel-${normalized}-${index}`;
    +}
    +
     export type TabsProps = {
       items?: string[];
       defaultIndex?: number;
    @@ -46,23 +60,59 @@ export function Tabs({ items = [], defaultIndex = 0, children }: TabsProps) {
         [items, activeValue, groupId]
       );
     
    +  const handleKeyDown = (event: KeyboardEvent): void => {
    +    if (items.length === 0) {
    +      return;
    +    }
    +    const currentIndex = items.findIndex(
    +      (item) => normalize(item) === activeValue
    +    );
    +    let nextIndex = currentIndex;
    +
    +    if (event.key === "ArrowRight" || event.key === "ArrowDown") {
    +      nextIndex = (currentIndex + 1) % items.length;
    +    } else if (event.key === "ArrowLeft" || event.key === "ArrowUp") {
    +      nextIndex = (currentIndex - 1 + items.length) % items.length;
    +    } else if (event.key === "Home") {
    +      nextIndex = 0;
    +    } else if (event.key === "End") {
    +      nextIndex = items.length - 1;
    +    } else {
    +      return;
    +    }
    +
    +    event.preventDefault();
    +    const nextItem = items[nextIndex];
    +    if (nextItem === undefined) {
    +      return;
    +    }
    +    const nextNormalized = normalize(nextItem);
    +    setActiveValue(nextNormalized);
    +    // Move focus to the newly active trigger.
    +    const nextId = triggerId(groupId, nextNormalized, nextIndex);
    +    document.getElementById(nextId)?.focus();
    +  };
    +
       return (
         
    {items.length > 0 ? (
    - {items.map((item) => { + {items.map((item, index) => { const normalized = normalize(item); const isActive = normalized === activeValue; return (