Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
.eslintcache
.cache
*.tsbuildinfo
.bun-cache/

# IntelliJ based IDEs
.idea
Expand Down
11 changes: 10 additions & 1 deletion bun.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@
"typescript": "^5"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.87.0",
"commander": "^14.0.3",
"gray-matter": "^4.0.3",
"ink": "^6.8.0",
Expand Down
7 changes: 4 additions & 3 deletions src/cli/bootstrap/model-wizard.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,12 @@ import { currentTheme } from "../tui/themes";

type Step = "provider" | "apiKey" | "modelName" | "baseURL" | "confirm";

function buildModelEntry(baseURL: string, apiKey: string, modelName: string): ModelEntry {
function buildModelEntry(baseURL: string, apiKey: string, modelName: string, provider: ModelEntry["provider"]): ModelEntry {
return {
name: modelName.trim(),
baseURL: baseURL.trim(),
APIKey: apiKey.trim(),
provider,
};
}

Expand Down Expand Up @@ -75,7 +76,7 @@ function ModelWizard({ onComplete, onAbort }: ModelWizardProps) {

const finishWithBaseURL = (url: string) => {
setCustomBaseURL(url);
const entry = buildModelEntry(url, apiKey, modelName);
const entry = buildModelEntry(url, apiKey, modelName, selectedProvider.providerType);
setPendingEntry(entry);
setStep("confirm");
};
Expand All @@ -84,7 +85,7 @@ function ModelWizard({ onComplete, onAbort }: ModelWizardProps) {
if (!selectedProvider.baseURL) {
setStep("baseURL");
} else {
const entry = buildModelEntry(selectedProvider.baseURL, apiKey, modelName);
const entry = buildModelEntry(selectedProvider.baseURL, apiKey, modelName, selectedProvider.providerType);
setPendingEntry(entry);
setStep("confirm");
}
Expand Down
2 changes: 2 additions & 0 deletions src/cli/config/schema.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ export const modelEntrySchema = z.object({
name: z.string().min(1),
baseURL: z.string().min(1),
APIKey: z.string().min(1),
/** Provider type: "openai" (default) or "anthropic". */
provider: z.enum(["openai", "anthropic"]).optional().default("openai"),
});

export const helixentConfigSchema = z.object({
Expand Down
18 changes: 14 additions & 4 deletions src/cli/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@ import { validateIntegrity } from "@/cli/bootstrap";
import { registerCommands } from "@/cli/commands";
import { loadConfig } from "@/cli/config";
import { createCodingAgent, globalApprovalManager } from "@/coding";
import { AnthropicModelProvider } from "@/community/anthropic";
import { OpenAIModelProvider } from "@/community/openai";
import type { ModelProvider } from "@/foundation";
import { Model } from "@/foundation";

import { App } from "./tui";
Expand Down Expand Up @@ -38,10 +40,18 @@ if (args.length > 0) {
throw new Error("No models configured. Run `helixent config model add` to add one.");
}

const provider = new OpenAIModelProvider({
baseURL: entry.baseURL,
apiKey: entry.APIKey,
});
let provider: ModelProvider;
if (entry.provider === "anthropic") {
provider = new AnthropicModelProvider({
baseURL: entry.baseURL,
apiKey: entry.APIKey,
});
} else {
provider = new OpenAIModelProvider({
baseURL: entry.baseURL,
apiKey: entry.APIKey,
});
}

const model = new Model(entry.name, provider, {
max_tokens: 16 * 1024,
Expand Down
23 changes: 14 additions & 9 deletions src/cli/model-providers.ts
Original file line number Diff line number Diff line change
@@ -1,22 +1,27 @@
export type ProviderType = "openai" | "anthropic";

export type ModelProviderConfig = {
label: string;
id: string;
baseURL: string;
providerType: ProviderType;
};

export const MODEL_PROVIDERS: ModelProviderConfig[] = [
{ label: "OpenAI", id: "openai", baseURL: "https://api.openai.com/v1" },
{ label: "Volcengine - General", id: "volcengine", baseURL: "https://ark.cn-beijing.volces.com/api/v3" },
{ label: "Anthropic (Claude)", id: "anthropic", baseURL: "https://api.anthropic.com", providerType: "anthropic" },
{ label: "OpenAI", id: "openai", baseURL: "https://api.openai.com/v1", providerType: "openai" },
{ label: "Volcengine - General", id: "volcengine", baseURL: "https://ark.cn-beijing.volces.com/api/v3", providerType: "openai" },
{
label: "Volcengine - Coding Plan",
id: "volcengine_coding_plan",
baseURL: "https://ark.cn-beijing.volces.com/api/coding/v3",
providerType: "openai",
},
{ label: "Qwen (Aliyun)", id: "qwen", baseURL: "https://dashscope.aliyuncs.com/compatible-mode/v1" },
{ label: "Minimax (Domestic)", id: "minimax_cn", baseURL: "https://api.minimaxi.com/v1" },
{ label: "Minimax (Global)", id: "minimax_global", baseURL: "https://api.minimax.io/v1" },
{ label: "GLM (Zhipu AI)", id: "glm", baseURL: "https://open.bigmodel.cn/api/paas/v4" },
{ label: "Kimi (Moonshot)", id: "kimi", baseURL: "https://api.moonshot.cn/v1" },
{ label: "DeepSeek (OpenAI compatible)", id: "deepseek", baseURL: "https://api.deepseek.com/v1" },
{ label: "Other", id: "other", baseURL: "" },
{ label: "Qwen (Aliyun)", id: "qwen", baseURL: "https://dashscope.aliyuncs.com/compatible-mode/v1", providerType: "openai" },
{ label: "Minimax (Domestic)", id: "minimax_cn", baseURL: "https://api.minimaxi.com/v1", providerType: "openai" },
{ label: "Minimax (Global)", id: "minimax_global", baseURL: "https://api.minimax.io/v1", providerType: "openai" },
{ label: "GLM (Zhipu AI)", id: "glm", baseURL: "https://open.bigmodel.cn/api/paas/v4", providerType: "openai" },
{ label: "Kimi (Moonshot)", id: "kimi", baseURL: "https://api.moonshot.cn/v1", providerType: "openai" },
{ label: "DeepSeek (OpenAI compatible)", id: "deepseek", baseURL: "https://api.deepseek.com/v1", providerType: "openai" },
{ label: "Other", id: "other", baseURL: "", providerType: "openai" },
];
1 change: 1 addition & 0 deletions src/community/anthropic/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
export * from "./model-provider";
88 changes: 88 additions & 0 deletions src/community/anthropic/model-provider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
import Anthropic from "@anthropic-ai/sdk";

import type { AssistantMessage, ModelProvider, ModelProviderInvokeParams, TokenUsage } from "@/foundation";

import { StreamAccumulator } from "./stream-utils";
import {
convertToAnthropicMessages,
convertToAnthropicTools,
extractSystemPrompt,
parseAssistantMessage,
} from "./utils";

/**
* A provider for the Anthropic API (Claude models).
*/
export class AnthropicModelProvider implements ModelProvider {
_client: Anthropic;

constructor({ baseURL, apiKey }: { baseURL?: string; apiKey?: string } = {}) {
// Only pass baseURL if it differs from the SDK default, so the SDK's
// own URL construction logic is used for the standard Anthropic endpoint.
const isDefaultURL = !baseURL || baseURL === "https://api.anthropic.com";
this._client = new Anthropic({
...(isDefaultURL ? {} : { baseURL }),
apiKey,
});
}

async invoke(params: ModelProviderInvokeParams) {
const response = await this._client.messages.create(this._baseMessageParams(params), {
signal: params.signal,
});
return parseAssistantMessage(response, toTokenUsage(response.usage));
}

async *stream(params: ModelProviderInvokeParams): AsyncGenerator<AssistantMessage> {
const response = await this._client.messages.create(
{ ...this._baseMessageParams(params), stream: true },
{ signal: params.signal },
);

const acc = new StreamAccumulator();
for await (const event of response) {
acc.push(event);
yield acc.snapshot();
}
}

private _baseMessageParams({
model,
messages,
tools,
options,
}: ModelProviderInvokeParams): Anthropic.MessageCreateParamsNonStreaming {
const system = extractSystemPrompt(messages);
const anthropicMessages = convertToAnthropicMessages(messages);
const anthropicTools = tools ? convertToAnthropicTools(tools) : undefined;

// Normalize options for Anthropic's API.
// When thinking is enabled, Anthropic requires `budget_tokens`.
// Default the budget to max_tokens minus a small buffer for the response.
const normalizedOptions = { ...options };
const thinking = normalizedOptions.thinking as { type: string; budget_tokens?: number } | undefined;
if (thinking?.type === "enabled" && !thinking.budget_tokens) {
const maxTokens = (normalizedOptions.max_tokens as number | undefined) ?? 8192;
thinking.budget_tokens = Math.floor(maxTokens * 0.8);
normalizedOptions.thinking = thinking;
}

return {
model,
max_tokens: 8192,
messages: anthropicMessages,
...(system ? { system } : {}),
...(anthropicTools && anthropicTools.length > 0 ? { tools: anthropicTools } : {}),
...normalizedOptions,
};
}
}

function toTokenUsage(usage?: Anthropic.Usage): TokenUsage | undefined {
if (!usage) return undefined;
return {
promptTokens: usage.input_tokens ?? 0,
completionTokens: usage.output_tokens ?? 0,
totalTokens: (usage.input_tokens ?? 0) + (usage.output_tokens ?? 0),
};
}
Loading
Loading