diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8ed82589..b47837ca 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,6 +9,7 @@ jobs: runs-on: ubuntu-latest permissions: contents: write + packages: write steps: - uses: actions/checkout@v4 @@ -63,3 +64,57 @@ jobs: body_path: notes.md prerelease: ${{ steps.version.outputs.prerelease == 'true' }} generate_release_notes: false + + publish-opentypebb: + runs-on: ubuntu-latest + needs: release + if: needs.release.result == 'success' + permissions: + contents: read + packages: write + + steps: + - uses: actions/checkout@v4 + + - uses: pnpm/action-setup@v4 + + - uses: actions/setup-node@v4 + with: + node-version: 22 + registry-url: https://npm.pkg.github.com + cache: pnpm + + - name: Check if version already published + id: check + working-directory: packages/opentypebb + run: | + VERSION=$(node -p "require('./package.json').version") + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + if npm view "@traderalice/opentypebb@$VERSION" version --registry=https://npm.pkg.github.com 2>/dev/null; then + echo "exists=true" >> "$GITHUB_OUTPUT" + else + echo "exists=false" >> "$GITHUB_OUTPUT" + fi + env: + NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Build + if: steps.check.outputs.exists == 'false' + working-directory: packages/opentypebb + run: | + pnpm install --frozen-lockfile + pnpm build + + - name: Publish to GitHub Packages + if: steps.check.outputs.exists == 'false' + working-directory: packages/opentypebb + run: npm publish + env: + NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Publish to npmjs + if: steps.check.outputs.exists == 'false' + working-directory: packages/opentypebb + run: npm publish --registry=https://registry.npmjs.org + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/src/skills/createDocx.md b/data/default/skills/createDocx.md similarity index 100% rename from src/skills/createDocx.md rename to data/default/skills/createDocx.md diff --git a/package.json b/package.json index c303b77c..3db2f6b1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-alice", - "version": "0.9.0-beta.1", + "version": "0.9.0-beta.2", "description": "File-based trading agent engine", "type": "module", "scripts": { @@ -47,7 +47,7 @@ "grammy": "^1.40.0", "hono": "^4.12.5", "json5": "^2.2.3", - "opentypebb": "link:./packages/opentypebb", + "@traderalice/opentypebb": "link:./packages/opentypebb", "pino": "^10.3.1", "playwright-core": "1.58.2", "sharp": "^0.34.5", diff --git a/packages/opentypebb/package.json b/packages/opentypebb/package.json index a9155d93..85d6804f 100644 --- a/packages/opentypebb/package.json +++ b/packages/opentypebb/package.json @@ -1,5 +1,5 @@ { - "name": "opentypebb", + "name": "@traderalice/opentypebb", "version": "0.1.0", "description": "TypeScript port of OpenBB Platform — financial data infrastructure", "type": "module", @@ -13,6 +13,27 @@ "types": "./src/server.ts" } }, + "files": [ + "dist" + ], + "repository": { + "type": "git", + "url": "git+https://github.com/TraderAlice/OpenAlice.git", + "directory": "packages/opentypebb" + }, + "publishConfig": { + "registry": "https://npm.pkg.github.com", + "exports": { + ".": { + "import": "./dist/index.js", + "types": "./dist/index.d.ts" + }, + "./server": { + "import": "./dist/server.js", + "types": "./dist/server.d.ts" + } + } + }, "scripts": { "dev": "tsx src/server.ts", "build": "tsup", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 773857fa..0b0de3db 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -35,6 +35,9 @@ importers: '@sinclair/typebox': specifier: 0.34.48 version: 0.34.48 + '@traderalice/opentypebb': + specifier: link:./packages/opentypebb + version: link:packages/opentypebb ai: specifier: ^6.0.86 version: 6.0.86(zod@4.3.6) @@ -65,9 +68,6 @@ importers: json5: specifier: ^2.2.3 version: 2.2.3 - opentypebb: - specifier: link:./packages/opentypebb - version: link:packages/opentypebb pino: specifier: ^10.3.1 version: 10.3.1 diff --git a/src/ai-providers/agent-sdk/agent-sdk-provider.ts b/src/ai-providers/agent-sdk/agent-sdk-provider.ts index a9322f2a..689c3539 100644 --- a/src/ai-providers/agent-sdk/agent-sdk-provider.ts +++ b/src/ai-providers/agent-sdk/agent-sdk-provider.ts @@ -10,8 +10,7 @@ import { resolve } from 'node:path' import type { Tool } from 'ai' -import type { ProviderResult, ProviderEvent } from '../../core/ai-provider.js' -import type { GenerateProvider, GenerateInput, GenerateOpts } from '../../core/ai-provider.js' +import type { ProviderResult, ProviderEvent, GenerateProvider, GenerateInput, GenerateOpts } from '../types.js' import type { AgentSdkConfig, AgentSdkOverride } from './query.js' import { readAgentConfig } from '../../core/config.js' import { extractMediaFromToolResultContent } from '../../core/media.js' diff --git a/src/ai-providers/agent-sdk/tool-bridge.ts b/src/ai-providers/agent-sdk/tool-bridge.ts index ebe898d1..9b424801 100644 --- a/src/ai-providers/agent-sdk/tool-bridge.ts +++ b/src/ai-providers/agent-sdk/tool-bridge.ts @@ -1,7 +1,7 @@ /** * Tool bridge — converts ToolCenter's Vercel AI SDK tools to an Agent SDK MCP server. * - * Reuses the same pattern as `src/plugins/mcp.ts` (extract .shape, wrap execute), + * Reuses the same pattern as `src/server/mcp.ts` (extract .shape, wrap execute), * but targets `createSdkMcpServer()` instead of `@modelcontextprotocol/sdk McpServer`. */ diff --git a/src/ai-providers/claude-code/claude-code-provider.ts b/src/ai-providers/claude-code/claude-code-provider.ts index e97b4e82..5ce5482b 100644 --- a/src/ai-providers/claude-code/claude-code-provider.ts +++ b/src/ai-providers/claude-code/claude-code-provider.ts @@ -9,8 +9,7 @@ */ import { resolve } from 'node:path' -import type { ProviderResult, ProviderEvent } from '../../core/ai-provider.js' -import type { GenerateProvider, GenerateInput, GenerateOpts } from '../../core/ai-provider.js' +import type { ProviderResult, ProviderEvent, GenerateProvider, GenerateInput, GenerateOpts } from '../types.js' import type { ClaudeCodeConfig } from './types.js' import { readAgentConfig } from '../../core/config.js' import { extractMediaFromToolResultContent } from '../../core/media.js' diff --git a/src/ai-providers/types.ts b/src/ai-providers/types.ts new file mode 100644 index 00000000..c18bc9d8 --- /dev/null +++ b/src/ai-providers/types.ts @@ -0,0 +1,64 @@ +import type { SessionStore, SDKModelMessage } from '../core/session.js' +import type { CompactionConfig, CompactionResult } from '../core/compaction.js' +import type { MediaAttachment } from '../core/types.js' + +// ==================== Provider Events ==================== + +/** Streaming event emitted by AI providers during generation. */ +export type ProviderEvent = + | { type: 'tool_use'; id: string; name: string; input: unknown } + | { type: 'tool_result'; tool_use_id: string; content: string } + | { type: 'text'; text: string } + | { type: 'done'; result: ProviderResult } + +// ==================== Types ==================== + +export interface ProviderResult { + text: string + media: MediaAttachment[] + mediaUrls?: string[] +} + +// ==================== GenerateProvider ==================== + +/** + * Input prepared by AgentCenter, dispatched by provider.inputKind. + * + * - 'text': Claude Code / Agent SDK — single string prompt with baked in. + * - 'messages': Vercel AI SDK — structured ModelMessage[] (history carried natively). + */ +export type GenerateInput = + | { kind: 'text'; prompt: string; systemPrompt?: string } + | { kind: 'messages'; messages: SDKModelMessage[]; systemPrompt?: string } + +/** Per-request options passed through to the underlying provider. */ +export interface GenerateOpts { + disabledTools?: string[] + vercelAiSdk?: { provider: string; model: string; baseUrl?: string; apiKey?: string } + agentSdk?: { model?: string; apiKey?: string; baseUrl?: string } +} + +/** + * Slim provider interface — pure data-source adapter. + * + * Does NOT touch session management. AgentCenter prepares the input, + * the provider calls the backend and yields ProviderEvents. + */ +export interface GenerateProvider { + /** Which input format this provider expects. */ + readonly inputKind: 'text' | 'messages' + /** Session log provenance tag. */ + readonly providerTag: 'vercel-ai' | 'claude-code' | 'agent-sdk' + /** Stateless one-shot prompt (used for compaction summarization, etc.). */ + ask(prompt: string): Promise + /** Stream events from the backend. Yields tool_use/tool_result/text, then done. */ + generate(input: GenerateInput, opts?: GenerateOpts): AsyncIterable + /** + * Optional: custom compaction strategy. If implemented, AgentCenter delegates + * compaction to the provider instead of using the default compactIfNeeded. + * + * Use case: providers with native server-side compaction (e.g. Anthropic API + * compact-2026-01-12) can bypass the local JSONL-based summarization. + */ + compact?(session: SessionStore, config: CompactionConfig): Promise +} diff --git a/src/core/model-factory.ts b/src/ai-providers/vercel-ai-sdk/model-factory.ts similarity index 97% rename from src/core/model-factory.ts rename to src/ai-providers/vercel-ai-sdk/model-factory.ts index 8326bf85..447cc0eb 100644 --- a/src/core/model-factory.ts +++ b/src/ai-providers/vercel-ai-sdk/model-factory.ts @@ -7,7 +7,7 @@ */ import type { LanguageModel } from 'ai' -import { readAIProviderConfig } from './config.js' +import { readAIProviderConfig } from '../../core/config.js' /** Result includes the model plus a cache key for change detection. */ export interface ModelFromConfig { diff --git a/src/ai-providers/vercel-ai-sdk/vercel-provider.ts b/src/ai-providers/vercel-ai-sdk/vercel-provider.ts index 539eee2c..31e09190 100644 --- a/src/ai-providers/vercel-ai-sdk/vercel-provider.ts +++ b/src/ai-providers/vercel-ai-sdk/vercel-provider.ts @@ -7,12 +7,11 @@ */ import type { ModelMessage, Tool } from 'ai' -import type { ProviderResult, ProviderEvent } from '../../core/ai-provider.js' -import type { GenerateProvider, GenerateInput, GenerateOpts } from '../../core/ai-provider.js' +import type { ProviderResult, ProviderEvent, GenerateProvider, GenerateInput, GenerateOpts } from '../types.js' import type { Agent } from './agent.js' import type { MediaAttachment } from '../../core/types.js' import { extractMediaFromToolOutput } from '../../core/media.js' -import { createModelFromConfig, type ModelOverride } from '../../core/model-factory.js' +import { createModelFromConfig, type ModelOverride } from './model-factory.js' import { createAgent } from './agent.js' import { createChannel } from '../../core/async-channel.js' diff --git a/src/connectors/telegram/telegram-plugin.ts b/src/connectors/telegram/telegram-plugin.ts index fa5a2811..fdb6b757 100644 --- a/src/connectors/telegram/telegram-plugin.ts +++ b/src/connectors/telegram/telegram-plugin.ts @@ -10,8 +10,9 @@ import { askClaudeCode } from '../../ai-providers/claude-code/index.js' import type { ClaudeCodeConfig } from '../../ai-providers/claude-code/index.js' import { SessionStore } from '../../core/session' import { forceCompact } from '../../core/compaction' -import { readAIConfig, writeAIConfig, type AIBackend } from '../../core/ai-config' -import type { ConnectorCenter, Connector } from '../../core/connector-center.js' +import { readAIBackend, writeAIBackend, type AIBackend } from '../../core/config' +import type { ConnectorCenter } from '../../core/connector-center.js' +import type { Connector } from '../types.js' const MAX_MESSAGE_LENGTH = 4096 @@ -82,7 +83,7 @@ export class TelegramPlugin implements Plugin { // ── Commands ── bot.command('status', async (ctx) => { - const aiConfig = await readAIConfig() + const aiConfig = await readAIBackend() await this.sendReply(ctx.chat.id, `Engine is running. Provider: ${BACKEND_LABELS[aiConfig.backend]}`) }) @@ -106,7 +107,7 @@ export class TelegramPlugin implements Plugin { try { if (data.startsWith('provider:')) { const backend = data.slice('provider:'.length) as AIBackend - await writeAIConfig(backend) + await writeAIBackend(backend) await ctx.answerCallbackQuery({ text: `Switched to ${BACKEND_LABELS[backend]}` }) // Edit the original settings message in-place @@ -170,7 +171,7 @@ export class TelegramPlugin implements Plugin { // ── Initialize and get bot info ── await bot.init() - const aiConfig = await readAIConfig() + const aiConfig = await readAIBackend() console.log(`telegram plugin: connected as @${bot.botInfo.username} (backend: ${aiConfig.backend})`) // ── Register connector for outbound delivery (heartbeat / cron responses) ── @@ -320,7 +321,7 @@ export class TelegramPlugin implements Plugin { } private async sendSettingsMenu(chatId: number) { - const aiConfig = await readAIConfig() + const aiConfig = await readAIBackend() const ccLabel = aiConfig.backend === 'claude-code' ? '> Claude Code' : 'Claude Code' const aiLabel = aiConfig.backend === 'vercel-ai-sdk' ? '> Vercel AI SDK' : 'Vercel AI SDK' const sdkLabel = aiConfig.backend === 'agent-sdk' ? '> Agent SDK' : 'Agent SDK' diff --git a/src/connectors/types.ts b/src/connectors/types.ts new file mode 100644 index 00000000..9482902a --- /dev/null +++ b/src/connectors/types.ts @@ -0,0 +1,58 @@ +import type { MediaAttachment } from '../core/types.js' +import type { StreamableResult } from '../core/ai-provider.js' + +// ==================== Send Types ==================== + +/** Structured payload for outbound send (heartbeat, cron, manual, etc.). */ +export interface SendPayload { + /** Whether this is a chat message or a notification. */ + kind: 'message' | 'notification' + /** The text content to send. */ + text: string + /** Media attachments (e.g. screenshots from tools). */ + media?: MediaAttachment[] + /** Where this payload originated from. */ + source?: 'heartbeat' | 'cron' | 'manual' +} + +/** Result of a send() call. */ +export interface SendResult { + /** Whether the message was actually sent (false for pull-based connectors). */ + delivered: boolean +} + +// ==================== Connector Interface ==================== + +/** Discoverable capabilities a connector may support. */ +export interface ConnectorCapabilities { + /** Can push messages proactively (heartbeat/cron). False for pull-based. */ + push: boolean + /** Can send media attachments (images). */ + media: boolean +} + +/** + * A connector that can send outbound messages to a user. + * + * Each plugin (Telegram, Web, MCP-ask) implements this interface and + * registers itself with the ConnectorCenter. + */ +export interface Connector { + /** Channel identifier, e.g. "telegram", "web", "mcp-ask". */ + readonly channel: string + /** Recipient identifier (chat id, "default", session id, etc.). */ + readonly to: string + /** What this connector can do. */ + readonly capabilities: ConnectorCapabilities + /** Send a structured payload through this connector. */ + send(payload: SendPayload): Promise + /** + * Optional: stream AI response events to the client in real-time. + * Connectors that support this can push ProviderEvents (tool_use, tool_result, text) + * as they arrive, then deliver the final result at the end. + * + * If not implemented, ConnectorCenter falls back to draining the stream + * and calling send() with the completed result. + */ + sendStream?(stream: StreamableResult, meta?: Pick): Promise +} diff --git a/src/connectors/web/routes/config.ts b/src/connectors/web/routes/config.ts index 83688aee..6e3c55d2 100644 --- a/src/connectors/web/routes/config.ts +++ b/src/connectors/web/routes/config.ts @@ -1,6 +1,5 @@ import { Hono } from 'hono' -import { loadConfig, writeConfigSection, readAIProviderConfig, readOpenbbConfig, validSections, type ConfigSection } from '../../../core/config.js' -import { readAIConfig, writeAIConfig, type AIBackend } from '../../../core/ai-config.js' +import { loadConfig, writeConfigSection, readAIProviderConfig, readOpenbbConfig, validSections, writeAIBackend, type ConfigSection, type AIBackend } from '../../../core/config.js' interface ConfigRouteOpts { onConnectorsChange?: () => Promise @@ -26,7 +25,7 @@ export function createConfigRoutes(opts?: ConfigRouteOpts) { if (backend !== 'claude-code' && backend !== 'vercel-ai-sdk' && backend !== 'agent-sdk') { return c.json({ error: 'Invalid backend. Must be "claude-code", "vercel-ai-sdk", or "agent-sdk".' }, 400) } - await writeAIConfig(backend as AIBackend) + await writeAIBackend(backend as AIBackend) return c.json({ backend }) } catch (err) { return c.json({ error: String(err) }, 500) diff --git a/src/connectors/web/web-plugin.ts b/src/connectors/web/web-plugin.ts index 3c1baec4..11b55435 100644 --- a/src/connectors/web/web-plugin.ts +++ b/src/connectors/web/web-plugin.ts @@ -5,7 +5,7 @@ import { serveStatic } from '@hono/node-server/serve-static' import { resolve } from 'node:path' import type { Plugin, EngineContext } from '../../core/types.js' import { SessionStore, type ContentBlock } from '../../core/session.js' -import type { Connector, SendPayload } from '../../core/connector-center.js' +import type { Connector, SendPayload } from '../types.js' import type { StreamableResult } from '../../core/ai-provider.js' import { persistMedia } from '../../core/media-store.js' import { readWebSubchannels } from '../../core/config.js' diff --git a/src/core/__tests__/pipeline/persistence.spec.ts b/src/core/__tests__/pipeline/persistence.spec.ts index 9bfddb91..396473c3 100644 --- a/src/core/__tests__/pipeline/persistence.spec.ts +++ b/src/core/__tests__/pipeline/persistence.spec.ts @@ -309,7 +309,7 @@ describe('AgentCenter — session persistence', () => { ) }) - it('A12: multiple consecutive text events all buffered in intermediate flush', async () => { + it('A12: multiple consecutive text events persisted once via final result', async () => { const provider = new FakeProvider([ textEvent('first '), textEvent('second '), @@ -323,19 +323,9 @@ describe('AgentCenter — session persistence', () => { const assistantWrites = session.writes.filter(w => w.method === 'appendAssistant') - const intermediateFlush = assistantWrites.find(w => { - const content = w.content as ContentBlock[] - return Array.isArray(content) && content.filter(b => b.type === 'text').length === 3 - }) - expect(intermediateFlush).toBeDefined() - expect(intermediateFlush!.content).toEqual([ - { type: 'text', text: 'first ' }, - { type: 'text', text: 'second ' }, - { type: 'text', text: 'third' }, - ]) - - const finalWrite = assistantWrites[assistantWrites.length - 1] - expect(finalWrite.content).toEqual([{ type: 'text', text: 'first second third' }]) + // Only one assistant write — the authoritative final text (no duplicate intermediate flush) + expect(assistantWrites).toHaveLength(1) + expect(assistantWrites[0].content).toEqual([{ type: 'text', text: 'first second third' }]) }) it('A13: tool_use with complex nested input preserves structure', async () => { diff --git a/src/core/engine.spec.ts b/src/core/agent-center.spec.ts similarity index 97% rename from src/core/engine.spec.ts rename to src/core/agent-center.spec.ts index 92bf0d97..7fb28870 100644 --- a/src/core/engine.spec.ts +++ b/src/core/agent-center.spec.ts @@ -5,7 +5,7 @@ import { AgentCenter } from './agent-center.js' import { GenerateRouter } from './ai-provider.js' import { DEFAULT_COMPACTION_CONFIG, type CompactionConfig } from './compaction.js' import { VercelAIProvider } from '../ai-providers/vercel-ai-sdk/vercel-provider.js' -import { createModelFromConfig } from './model-factory.js' +import { createModelFromConfig } from '../ai-providers/vercel-ai-sdk/model-factory.js' import type { SessionStore, SessionEntry } from './session.js' // ==================== Helpers ==================== @@ -43,7 +43,7 @@ function makeAgentCenter(overrides: MakeAgentCenterOpts = {}): AgentCenter { const compaction = overrides.compaction ?? DEFAULT_COMPACTION_CONFIG vi.mocked(createModelFromConfig).mockResolvedValue({ model, key: 'test:mock-model' }) - const provider = new VercelAIProvider(() => tools, instructions, maxSteps) + const provider = new VercelAIProvider(async () => tools, instructions, maxSteps) const router = new GenerateRouter(provider, null) return new AgentCenter({ router, compaction }) @@ -88,7 +88,7 @@ function makeSessionMock(entries: SessionEntry[] = []): SessionStore { // ==================== Mock model-factory ==================== -vi.mock('./model-factory.js', () => ({ +vi.mock('../ai-providers/vercel-ai-sdk/model-factory.js', () => ({ createModelFromConfig: vi.fn(), })) diff --git a/src/core/agent-center.ts b/src/core/agent-center.ts index ae4e2444..5f696fdc 100644 --- a/src/core/agent-center.ts +++ b/src/core/agent-center.ts @@ -165,10 +165,9 @@ export class AgentCenter { } } - // Flush any remaining intermediate blocks - if (currentAssistantBlocks.length > 0) { - intermediateMessages.push({ role: 'assistant', content: currentAssistantBlocks }) - } + // Flush any remaining user blocks (defensive — tool_result already flushes) + // NOTE: Do NOT flush trailing assistant text blocks here — the authoritative + // final text comes from the done event and is persisted once in step 8. if (currentUserBlocks.length > 0) { intermediateMessages.push({ role: 'user', content: currentUserBlocks }) } diff --git a/src/core/ai-config.ts b/src/core/ai-config.ts deleted file mode 100644 index d142b15e..00000000 --- a/src/core/ai-config.ts +++ /dev/null @@ -1,19 +0,0 @@ -import { readFile, writeFile, mkdir } from 'node:fs/promises' -import { resolve } from 'node:path' -import { readAIProviderConfig } from './config.js' - -export type AIBackend = 'claude-code' | 'vercel-ai-sdk' | 'agent-sdk' - -const CONFIG_PATH = resolve('data/config/ai-provider.json') - -export async function readAIConfig() { - const config = await readAIProviderConfig() - return { backend: config.backend } -} - -export async function writeAIConfig(backend: AIBackend): Promise { - const current = await readAIProviderConfig() - const updated = { ...current, backend } - await mkdir(resolve('data/config'), { recursive: true }) - await writeFile(CONFIG_PATH, JSON.stringify(updated, null, 2) + '\n') -} diff --git a/src/core/ai-provider.ts b/src/core/ai-provider.ts index c5bde42e..b8f69911 100644 --- a/src/core/ai-provider.ts +++ b/src/core/ai-provider.ts @@ -1,27 +1,18 @@ /** - * AI Provider abstraction — GenerateProvider + GenerateRouter. + * AI Provider abstraction — StreamableResult + GenerateRouter. * - * GenerateProvider is a slim data-source adapter: each backend (Vercel AI SDK, - * Claude Code CLI, Agent SDK) implements `ask()` and `generate()`. - * Session management lives in AgentCenter, not here. - * - * GenerateRouter reads runtime config and resolves to the correct provider. + * Provider interface types (GenerateProvider, ProviderEvent, etc.) live in + * ai-providers/types.ts alongside the implementations. This file holds the + * core infrastructure that orchestrates providers. */ -import type { SessionStore } from './session.js' -import type { SDKModelMessage } from './session.js' -import type { CompactionConfig, CompactionResult } from './compaction.js' -import type { MediaAttachment } from './types.js' import { readAIProviderConfig } from './config.js' +import type { ProviderEvent, ProviderResult, GenerateProvider } from '../ai-providers/types.js' -// ==================== Provider Events ==================== - -/** Streaming event emitted by AI providers during generation. */ -export type ProviderEvent = - | { type: 'tool_use'; id: string; name: string; input: unknown } - | { type: 'tool_result'; tool_use_id: string; content: string } - | { type: 'text'; text: string } - | { type: 'done'; result: ProviderResult } +export type { + ProviderEvent, ProviderResult, GenerateProvider, + GenerateInput, GenerateOpts, +} from '../ai-providers/types.js' // ==================== StreamableResult ==================== @@ -140,56 +131,6 @@ export interface AskOptions { } } -export interface ProviderResult { - text: string - media: MediaAttachment[] - mediaUrls?: string[] -} - -// ==================== GenerateProvider ==================== - -/** - * Input prepared by AgentCenter, dispatched by provider.inputKind. - * - * - 'text': Claude Code / Agent SDK — single string prompt with baked in. - * - 'messages': Vercel AI SDK — structured ModelMessage[] (history carried natively). - */ -export type GenerateInput = - | { kind: 'text'; prompt: string; systemPrompt?: string } - | { kind: 'messages'; messages: SDKModelMessage[]; systemPrompt?: string } - -/** Per-request options passed through to the underlying provider. */ -export interface GenerateOpts { - disabledTools?: string[] - vercelAiSdk?: { provider: string; model: string; baseUrl?: string; apiKey?: string } - agentSdk?: { model?: string; apiKey?: string; baseUrl?: string } -} - -/** - * Slim provider interface — pure data-source adapter. - * - * Does NOT touch session management. AgentCenter prepares the input, - * the provider calls the backend and yields ProviderEvents. - */ -export interface GenerateProvider { - /** Which input format this provider expects. */ - readonly inputKind: 'text' | 'messages' - /** Session log provenance tag. */ - readonly providerTag: 'vercel-ai' | 'claude-code' | 'agent-sdk' - /** Stateless one-shot prompt (used for compaction summarization, etc.). */ - ask(prompt: string): Promise - /** Stream events from the backend. Yields tool_use/tool_result/text, then done. */ - generate(input: GenerateInput, opts?: GenerateOpts): AsyncIterable - /** - * Optional: custom compaction strategy. If implemented, AgentCenter delegates - * compaction to the provider instead of using the default compactIfNeeded. - * - * Use case: providers with native server-side compaction (e.g. Anthropic API - * compact-2026-01-12) can bypass the local JSONL-based summarization. - */ - compact?(session: SessionStore, config: CompactionConfig): Promise -} - // ==================== GenerateRouter ==================== /** Reads runtime AI config and resolves to the correct GenerateProvider. */ diff --git a/src/core/config.ts b/src/core/config.ts index 7fa71fbe..a8605b7f 100644 --- a/src/core/config.ts +++ b/src/core/config.ts @@ -518,6 +518,24 @@ export async function readToolsConfig() { } } +// ==================== AI Backend Helpers ==================== + +export type AIBackend = 'claude-code' | 'vercel-ai-sdk' | 'agent-sdk' + +/** Read the current AI backend from ai-provider.json. */ +export async function readAIBackend(): Promise<{ backend: AIBackend }> { + const config = await readAIProviderConfig() + return { backend: config.backend } +} + +/** Switch the AI backend in ai-provider.json (preserves other fields). */ +export async function writeAIBackend(backend: AIBackend): Promise { + const current = await readAIProviderConfig() + const updated = { ...current, backend } + await mkdir(CONFIG_DIR, { recursive: true }) + await writeFile(resolve(CONFIG_DIR, 'ai-provider.json'), JSON.stringify(updated, null, 2) + '\n') +} + // ==================== Writer ==================== export type ConfigSection = keyof Config diff --git a/src/core/connector-center.ts b/src/core/connector-center.ts index 0df07f37..241c7638 100644 --- a/src/core/connector-center.ts +++ b/src/core/connector-center.ts @@ -10,65 +10,12 @@ * replies go to whichever channel the user most recently interacted through. */ -import type { MediaAttachment } from './types.js' import type { EventLog } from './event-log.js' +import type { MediaAttachment } from './types.js' import type { StreamableResult } from './ai-provider.js' +import type { Connector, SendPayload, SendResult } from '../connectors/types.js' -// ==================== Send Types ==================== - -/** Structured payload for outbound send (heartbeat, cron, manual, etc.). */ -export interface SendPayload { - /** Whether this is a chat message or a notification. */ - kind: 'message' | 'notification' - /** The text content to send. */ - text: string - /** Media attachments (e.g. screenshots from tools). */ - media?: MediaAttachment[] - /** Where this payload originated from. */ - source?: 'heartbeat' | 'cron' | 'manual' -} - -/** Result of a send() call. */ -export interface SendResult { - /** Whether the message was actually sent (false for pull-based connectors). */ - delivered: boolean -} - -// ==================== Connector Interface ==================== - -/** Discoverable capabilities a connector may support. */ -export interface ConnectorCapabilities { - /** Can push messages proactively (heartbeat/cron). False for pull-based. */ - push: boolean - /** Can send media attachments (images). */ - media: boolean -} - -/** - * A connector that can send outbound messages to a user. - * - * Each plugin (Telegram, Web, MCP-ask) implements this interface and - * registers itself with the ConnectorCenter. - */ -export interface Connector { - /** Channel identifier, e.g. "telegram", "web", "mcp-ask". */ - readonly channel: string - /** Recipient identifier (chat id, "default", session id, etc.). */ - readonly to: string - /** What this connector can do. */ - readonly capabilities: ConnectorCapabilities - /** Send a structured payload through this connector. */ - send(payload: SendPayload): Promise - /** - * Optional: stream AI response events to the client in real-time. - * Connectors that support this can push ProviderEvents (tool_use, tool_result, text) - * as they arrive, then deliver the final result at the end. - * - * If not implemented, ConnectorCenter falls back to draining the stream - * and calling send() with the completed result. - */ - sendStream?(stream: StreamableResult, meta?: Pick): Promise -} +export type { Connector, SendPayload, SendResult, ConnectorCapabilities } from '../connectors/types.js' // ==================== Notify Types ==================== diff --git a/src/main.ts b/src/main.ts index 169ffb24..ceae7be4 100644 --- a/src/main.ts +++ b/src/main.ts @@ -3,7 +3,7 @@ import { resolve, dirname } from 'path' // Engine removed — AgentCenter is the top-level AI entry point import { loadConfig, loadTradingConfig } from './core/config.js' import type { Plugin, EngineContext, ReconnectResult } from './core/types.js' -import { McpPlugin } from './plugins/mcp.js' +import { McpPlugin } from './server/mcp.js' import { TelegramPlugin } from './connectors/telegram/index.js' import { WebPlugin } from './connectors/web/index.js' import { McpAskPlugin } from './connectors/mcp-ask/index.js' @@ -31,7 +31,7 @@ import { OpenBBEquityClient } from './openbb/equity/client.js' import { OpenBBCryptoClient } from './openbb/crypto/client.js' import { OpenBBCurrencyClient } from './openbb/currency/client.js' import { OpenBBNewsClient } from './openbb/news/client.js' -import { startEmbeddedOpenBBServer } from './openbb/api-server.js' +import { startEmbeddedOpenBBServer } from './server/opentypebb.js' import { createMarketSearchTools } from './extension/market/index.js' import { createNewsTools } from './extension/news/index.js' import { createAnalysisTools } from './extension/analysis-kit/index.js' diff --git a/src/openbb/sdk/base-client.ts b/src/openbb/sdk/base-client.ts index 1d717bab..fbfea082 100644 --- a/src/openbb/sdk/base-client.ts +++ b/src/openbb/sdk/base-client.ts @@ -11,7 +11,7 @@ * → executor.execute('fmp', 'EquityQuote', params, credentials) */ -import type { QueryExecutor } from 'opentypebb' +import type { QueryExecutor } from '@traderalice/opentypebb' export class SDKBaseClient { constructor( diff --git a/src/openbb/sdk/executor.ts b/src/openbb/sdk/executor.ts index 8b87cc97..18aadc12 100644 --- a/src/openbb/sdk/executor.ts +++ b/src/openbb/sdk/executor.ts @@ -6,7 +6,7 @@ * without HTTP overhead. */ -import { createExecutor, type QueryExecutor } from 'opentypebb' +import { createExecutor, type QueryExecutor } from '@traderalice/opentypebb' let _executor: QueryExecutor | null = null diff --git a/src/openbb/sdk/route-map.ts b/src/openbb/sdk/route-map.ts index fa4615df..c04a2543 100644 --- a/src/openbb/sdk/route-map.ts +++ b/src/openbb/sdk/route-map.ts @@ -8,7 +8,7 @@ * to call for each API path, providing a drop-in replacement for HTTP routing. */ -import { loadAllRouters } from 'opentypebb' +import { loadAllRouters } from '@traderalice/opentypebb' let _routeMap: Map | null = null diff --git a/src/plugins/mcp.ts b/src/server/mcp.ts similarity index 100% rename from src/plugins/mcp.ts rename to src/server/mcp.ts diff --git a/src/openbb/api-server.ts b/src/server/opentypebb.ts similarity index 91% rename from src/openbb/api-server.ts rename to src/server/opentypebb.ts index c05904b8..c4f28886 100644 --- a/src/openbb/api-server.ts +++ b/src/server/opentypebb.ts @@ -9,7 +9,7 @@ import { Hono } from 'hono' import { cors } from 'hono/cors' import { serve } from '@hono/node-server' -import { createExecutor, loadAllRouters } from 'opentypebb' +import { createExecutor, loadAllRouters } from '@traderalice/opentypebb' export function startEmbeddedOpenBBServer(port: number): void { const executor = createExecutor()