Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions packages/types/src/providers/zai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,21 @@ export const internationalZAiModels = {
description:
"GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times.",
},
"glm-5": {
maxTokens: 16_384,
contextWindow: 202_752,
supportsImages: false,
supportsPromptCache: true,
supportsReasoningEffort: ["disable", "medium"],
reasoningEffort: "medium",
preserveReasoning: true,
inputPrice: 0.6,
outputPrice: 2.2,
cacheWritesPrice: 0,
cacheReadsPrice: 0.11,
description:
"GLM-5 is Zhipu's next-generation model with a 202k context window and built-in thinking capabilities. It delivers state-of-the-art reasoning, coding, and agentic performance.",
},
"glm-4.7-flash": {
maxTokens: 16_384,
contextWindow: 200_000,
Expand Down Expand Up @@ -281,6 +296,21 @@ export const mainlandZAiModels = {
description:
"GLM-4.7 is Zhipu's latest model with built-in thinking capabilities enabled by default. It provides enhanced reasoning for complex tasks while maintaining fast response times.",
},
"glm-5": {
maxTokens: 16_384,
contextWindow: 202_752,
supportsImages: false,
supportsPromptCache: true,
supportsReasoningEffort: ["disable", "medium"],
reasoningEffort: "medium",
preserveReasoning: true,
inputPrice: 0.29,
outputPrice: 1.14,
cacheWritesPrice: 0,
cacheReadsPrice: 0.057,
description:
"GLM-5 is Zhipu's next-generation model with a 202k context window and built-in thinking capabilities. It delivers state-of-the-art reasoning, coding, and agentic performance.",
},
"glm-4.7-flash": {
maxTokens: 16_384,
contextWindow: 204_800,
Expand Down
100 changes: 100 additions & 0 deletions src/api/providers/__tests__/zai.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,22 @@ describe("ZAiHandler", () => {
expect(model.info.preserveReasoning).toBe(true)
})

it("should return GLM-5 international model with thinking support", () => {
const testModelId: InternationalZAiModelId = "glm-5"
const handlerWithModel = new ZAiHandler({
apiModelId: testModelId,
zaiApiKey: "test-zai-api-key",
zaiApiLine: "international_coding",
})
const model = handlerWithModel.getModel()
expect(model.id).toBe(testModelId)
expect(model.info).toEqual(internationalZAiModels[testModelId])
expect(model.info.contextWindow).toBe(202_752)
expect(model.info.supportsReasoningEffort).toEqual(["disable", "medium"])
expect(model.info.reasoningEffort).toBe("medium")
expect(model.info.preserveReasoning).toBe(true)
})

it("should return GLM-4.5v international model with vision support", () => {
const testModelId: InternationalZAiModelId = "glm-4.5v"
const handlerWithModel = new ZAiHandler({
Expand Down Expand Up @@ -203,6 +219,22 @@ describe("ZAiHandler", () => {
expect(model.info.reasoningEffort).toBe("medium")
expect(model.info.preserveReasoning).toBe(true)
})

it("should return GLM-5 China model with thinking support", () => {
const testModelId: MainlandZAiModelId = "glm-5"
const handlerWithModel = new ZAiHandler({
apiModelId: testModelId,
zaiApiKey: "test-zai-api-key",
zaiApiLine: "china_coding",
})
const model = handlerWithModel.getModel()
expect(model.id).toBe(testModelId)
expect(model.info).toEqual(mainlandZAiModels[testModelId])
expect(model.info.contextWindow).toBe(202_752)
expect(model.info.supportsReasoningEffort).toEqual(["disable", "medium"])
expect(model.info.reasoningEffort).toBe("medium")
expect(model.info.preserveReasoning).toBe(true)
})
})

describe("International API", () => {
Expand Down Expand Up @@ -508,6 +540,74 @@ describe("ZAiHandler", () => {
})
})

describe("GLM-5 Thinking Mode", () => {
it("should enable thinking by default for GLM-5 (default reasoningEffort is medium)", async () => {
const handlerWithModel = new ZAiHandler({
apiModelId: "glm-5",
zaiApiKey: "test-zai-api-key",
zaiApiLine: "international_coding",
})

async function* mockFullStream() {
yield { type: "text-delta", text: "response" }
}

mockStreamText.mockReturnValue({
fullStream: mockFullStream(),
usage: Promise.resolve({ inputTokens: 0, outputTokens: 0 }),
})

const stream = handlerWithModel.createMessage("system prompt", [])
for await (const _chunk of stream) {
// drain
}

expect(mockStreamText).toHaveBeenCalledWith(
expect.objectContaining({
providerOptions: {
zhipu: {
thinking: { type: "enabled" },
},
},
}),
)
})

it("should disable thinking for GLM-5 when reasoningEffort is set to disable", async () => {
const handlerWithModel = new ZAiHandler({
apiModelId: "glm-5",
zaiApiKey: "test-zai-api-key",
zaiApiLine: "international_coding",
enableReasoningEffort: true,
reasoningEffort: "disable",
})

async function* mockFullStream() {
yield { type: "text-delta", text: "response" }
}

mockStreamText.mockReturnValue({
fullStream: mockFullStream(),
usage: Promise.resolve({ inputTokens: 0, outputTokens: 0 }),
})

const stream = handlerWithModel.createMessage("system prompt", [])
for await (const _chunk of stream) {
// drain
}

expect(mockStreamText).toHaveBeenCalledWith(
expect.objectContaining({
providerOptions: {
zhipu: {
thinking: { type: "disabled" },
},
},
}),
)
})
})

describe("completePrompt", () => {
it("should complete a prompt using generateText", async () => {
mockGenerateText.mockResolvedValue({
Expand Down
4 changes: 2 additions & 2 deletions src/api/providers/zai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ export class ZAiHandler extends BaseProvider implements SingleCompletionHandler
toolChoice: mapToolChoice(metadata?.tool_choice),
}

// GLM-4.7 thinking mode: pass thinking parameter via providerOptions
const isThinkingModel = modelId === "glm-4.7" && Array.isArray(info.supportsReasoningEffort)
// Thinking mode: pass thinking parameter via providerOptions for models that support it (e.g. GLM-4.7, GLM-5)
const isThinkingModel = Array.isArray(info.supportsReasoningEffort)

if (isThinkingModel) {
const useReasoning = shouldUseReasoningEffort({ model: info, settings: this.options })
Expand Down
Loading