diff --git a/AGENTS.md b/AGENTS.md index 274e0df..df5d520 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -58,6 +58,7 @@ cat ~/.config/DankMaterialShell/plugin_settings.json | jq .aiAssistant - OpenAI (gpt-5.2 models) - Anthropic (claude-4.5 models) - Google Gemini (gemini-2.5-flash, gemini-3-flash-preview) +- Inception / Mercury 2: OpenAI-compatible chat completions plus [API parameters](https://docs.inceptionlabs.ai/get-started/api-parameters) (`reasoning_effort`, `reasoning_summary`, `reasoning_summary_wait`); [streaming](https://docs.inceptionlabs.ai/capabilities/streaming) - Custom (OpenAI-compatible endpoints) **Custom Provider Notes**: diff --git a/AIApiAdapters.js b/AIApiAdapters.js index 2b19ed8..49231d5 100644 --- a/AIApiAdapters.js +++ b/AIApiAdapters.js @@ -53,6 +53,8 @@ function buildRequest(provider, payload, apiKey) { return anthropicRequest(payload, apiKey); case "gemini": return geminiRequest(payload, apiKey); + case "inception": + return inceptionRequest(payload, apiKey); case "custom": return customRequest(payload, apiKey); default: @@ -73,6 +75,34 @@ function openaiRequest(payload, apiKey) { return { url, headers, body: JSON.stringify(body) }; } +function inceptionRequest(payload, apiKey) { + // Mercury 2 params: https://docs.inceptionlabs.ai/get-started/api-parameters + const url = openaiChatCompletionsUrl(payload.baseUrl || "https://api.inceptionlabs.ai/v1"); + const headers = ["-H", "Content-Type: application/json", "-H", "Authorization: Bearer " + apiKey]; + const maxTok = payload.max_tokens; + const mt = (typeof maxTok === "number" && maxTok > 0) ? Math.min(50000, maxTok) : 8192; + let t = (typeof payload.temperature === "number") ? payload.temperature : 0.75; + if (t < 0.5) + t = 0.5; + if (t > 1.0) + t = 1.0; + const body = { + model: payload.model, + messages: payload.messages, + max_tokens: mt, + temperature: t, + stream: true + }; + const efforts = ["instant", "low", "medium", "high"]; + const effort = String(payload.inceptionReasoningEffort || "medium").toLowerCase(); + if (efforts.indexOf(effort) >= 0) + body.reasoning_effort = effort; + body.reasoning_summary = payload.inceptionReasoningSummary !== false; + if (payload.inceptionReasoningSummaryWait === true) + body.reasoning_summary_wait = true; + return { url, headers, body: JSON.stringify(body) }; +} + function anthropicRequest(payload, apiKey) { const url = (payload.baseUrl || "https://api.anthropic.com") + "/v1/messages"; const headers = [ diff --git a/AIAssistantService.qml b/AIAssistantService.qml index a6c0b55..66a2817 100644 --- a/AIAssistantService.qml +++ b/AIAssistantService.qml @@ -47,6 +47,9 @@ Item { property string sessionApiKey: "" // In-memory key property string apiKeyEnvVar: "" property bool useMonospace: false + property string inceptionReasoningEffort: "medium" + property bool inceptionReasoningSummary: true + property bool inceptionReasoningSummaryWait: false readonly property bool debugEnabled: (Quickshell.env("DMS_LOG_LEVEL") || "").toLowerCase() === "debug" @@ -56,6 +59,20 @@ Item { function defaultsForProvider(id) { switch (id) { + case "inception": + return { + baseUrl: "https://api.inceptionlabs.ai/v1", + model: "mercury-2", + apiKey: "", + saveApiKey: false, + apiKeyEnvVar: "", + temperature: 0.75, + maxTokens: 8192, + timeout: 30, + inceptionReasoningEffort: "medium", + inceptionReasoningSummary: true, + inceptionReasoningSummaryWait: false + }; case "anthropic": return { baseUrl: "https://api.anthropic.com", @@ -106,7 +123,7 @@ Item { function normalizedProfile(id, raw) { const defaults = defaultsForProvider(id); const p = raw || {}; - return { + const profile = { baseUrl: String(p.baseUrl || defaults.baseUrl).trim(), model: String(p.model || defaults.model).trim(), apiKey: String(p.apiKey || "").trim(), @@ -116,6 +133,14 @@ Item { maxTokens: (typeof p.maxTokens === "number") ? p.maxTokens : defaults.maxTokens, timeout: (typeof p.timeout === "number") ? p.timeout : defaults.timeout }; + if (id === "inception") { + const efforts = ["instant", "low", "medium", "high"]; + let eff = String(p.inceptionReasoningEffort || defaults.inceptionReasoningEffort || "medium").toLowerCase(); + profile.inceptionReasoningEffort = efforts.indexOf(eff) >= 0 ? eff : "medium"; + profile.inceptionReasoningSummary = (typeof p.inceptionReasoningSummary === "boolean") ? p.inceptionReasoningSummary : (defaults.inceptionReasoningSummary !== false); + profile.inceptionReasoningSummaryWait = !!p.inceptionReasoningSummaryWait; + } + return profile; } function mergedProviders(rawProviders) { @@ -123,13 +148,14 @@ Item { openai: normalizedProfile("openai", null), anthropic: normalizedProfile("anthropic", null), gemini: normalizedProfile("gemini", null), + inception: normalizedProfile("inception", null), custom: normalizedProfile("custom", null) }; if (!rawProviders || typeof rawProviders !== "object") return base; - const ids = ["openai", "anthropic", "gemini", "custom"]; + const ids = ["openai", "anthropic", "gemini", "inception", "custom"]; for (let i = 0; i < ids.length; i++) { const id = ids[i]; if (rawProviders[id] && typeof rawProviders[id] === "object") { @@ -154,7 +180,7 @@ Item { function loadSettings() { suppressConfigChange = true const selectedProvider = String(PluginService.loadPluginData(pluginId, "provider", "openai")).trim() || "openai" - const providerId = ["openai", "anthropic", "gemini", "custom"].includes(selectedProvider) ? selectedProvider : "openai" + const providerId = ["openai", "anthropic", "gemini", "inception", "custom"].includes(selectedProvider) ? selectedProvider : "openai" const rawProviders = PluginService.loadPluginData(pluginId, "providers", null) let nextProviders = mergedProviders(rawProviders) @@ -186,6 +212,11 @@ Item { apiKey = active.apiKey saveApiKey = active.saveApiKey apiKeyEnvVar = active.apiKeyEnvVar + if (provider === "inception") { + inceptionReasoningEffort = active.inceptionReasoningEffort || "medium"; + inceptionReasoningSummary = active.inceptionReasoningSummary !== false; + inceptionReasoningSummaryWait = !!active.inceptionReasoningSummaryWait; + } useMonospace = PluginService.loadPluginData(pluginId, "useMonospace", false) suppressConfigChange = false @@ -348,6 +379,8 @@ Item { return Quickshell.env("DMS_ANTHROPIC_API_KEY") || ""; case "gemini": return Quickshell.env("DMS_GEMINI_API_KEY") || ""; + case "inception": + return Quickshell.env("DMS_INCEPTION_API_KEY") || ""; case "custom": return Quickshell.env("DMS_CUSTOM_API_KEY") || ""; default: @@ -361,6 +394,8 @@ Item { return Quickshell.env("ANTHROPIC_API_KEY") || ""; case "gemini": return Quickshell.env("GEMINI_API_KEY") || ""; + case "inception": + return Quickshell.env("INCEPTION_API_KEY") || ""; case "custom": return ""; default: @@ -563,7 +598,7 @@ Item { } msgs.push({ role: "user", content: latestText }); - return { + const payload = { provider: provider, baseUrl: baseUrl, model: model, @@ -573,6 +608,12 @@ Item { stream: true, timeout: timeout }; + if (provider === "inception") { + payload.inceptionReasoningEffort = inceptionReasoningEffort; + payload.inceptionReasoningSummary = inceptionReasoningSummary; + payload.inceptionReasoningSummaryWait = inceptionReasoningSummaryWait; + } + return payload; } function buildCurlCommand(payload) { diff --git a/AIAssistantSettings.qml b/AIAssistantSettings.qml index bc3d3ba..13033aa 100644 --- a/AIAssistantSettings.qml +++ b/AIAssistantSettings.qml @@ -28,6 +28,9 @@ Item { property real temperature: 0.7 property int maxTokens: 4096 property bool useMonospace: false + property string inceptionReasoningEffort: "medium" + property bool inceptionReasoningSummary: true + property bool inceptionReasoningSummaryWait: false function save(key, value) { PluginService.savePluginData(pluginId, key, value) @@ -36,6 +39,20 @@ Item { function defaultsForProvider(id) { switch (id) { + case "inception": + return { + baseUrl: "https://api.inceptionlabs.ai/v1", + model: "mercury-2", + apiKey: "", + saveApiKey: false, + apiKeyEnvVar: "", + temperature: 0.75, + maxTokens: 8192, + timeout: 30, + inceptionReasoningEffort: "medium", + inceptionReasoningSummary: true, + inceptionReasoningSummaryWait: false + }; case "anthropic": return { baseUrl: "https://api.anthropic.com", @@ -86,7 +103,7 @@ Item { function normalizedProfile(id, raw) { const d = defaultsForProvider(id) const p = raw || {} - return { + const profile = { baseUrl: String(p.baseUrl || d.baseUrl).trim(), model: String(p.model || d.model).trim(), apiKey: String(p.apiKey || "").trim(), @@ -96,6 +113,14 @@ Item { maxTokens: (typeof p.maxTokens === "number") ? p.maxTokens : d.maxTokens, timeout: (typeof p.timeout === "number") ? p.timeout : d.timeout } + if (id === "inception") { + const efforts = ["instant", "low", "medium", "high"] + let eff = String(p.inceptionReasoningEffort || d.inceptionReasoningEffort || "medium").toLowerCase() + profile.inceptionReasoningEffort = efforts.indexOf(eff) >= 0 ? eff : "medium" + profile.inceptionReasoningSummary = (typeof p.inceptionReasoningSummary === "boolean") ? p.inceptionReasoningSummary : (d.inceptionReasoningSummary !== false) + profile.inceptionReasoningSummaryWait = !!p.inceptionReasoningSummaryWait + } + return profile } function mergedProviders(rawProviders) { @@ -103,12 +128,13 @@ Item { openai: normalizedProfile("openai", null), anthropic: normalizedProfile("anthropic", null), gemini: normalizedProfile("gemini", null), + inception: normalizedProfile("inception", null), custom: normalizedProfile("custom", null) } if (!rawProviders || typeof rawProviders !== "object") return next - const ids = ["openai", "anthropic", "gemini", "custom"] + const ids = ["openai", "anthropic", "gemini", "inception", "custom"] for (let i = 0; i < ids.length; i++) { const id = ids[i] if (rawProviders[id] && typeof rawProviders[id] === "object") { @@ -132,6 +158,11 @@ Item { apiKeyEnvVar = active.apiKeyEnvVar temperature = active.temperature maxTokens = active.maxTokens + if (provider === "inception") { + inceptionReasoningEffort = active.inceptionReasoningEffort || "medium" + inceptionReasoningSummary = active.inceptionReasoningSummary !== false + inceptionReasoningSummaryWait = !!active.inceptionReasoningSummaryWait + } } function setProvider(providerId) { @@ -164,7 +195,7 @@ Item { function load() { const selectedProvider = String(PluginService.loadPluginData(pluginId, "provider", "openai")).trim() || "openai" - provider = ["openai", "anthropic", "gemini", "custom"].includes(selectedProvider) ? selectedProvider : "openai" + provider = ["openai", "anthropic", "gemini", "inception", "custom"].includes(selectedProvider) ? selectedProvider : "openai" const rawProviders = PluginService.loadPluginData(pluginId, "providers", null) let nextProviders = mergedProviders(rawProviders) @@ -302,7 +333,7 @@ Item { } DankDropdown { width: parent.width - options: ["openai", "anthropic", "gemini", "custom"] + options: ["openai", "anthropic", "gemini", "inception", "custom"] currentValue: root.provider onValueChanged: value => setProvider(value) } @@ -332,6 +363,82 @@ Item { placeholderText: "gpt-5.2" onEditingFinished: saveActiveField("model", text.trim()) } + + StyledText { + width: parent.width + visible: root.provider === "inception" + text: I18n.tr("Mercury 2: temperature 0.5–1.0, max_tokens 1–50000 (see Inception API parameters).") + font.pixelSize: Theme.fontSizeSmall + color: Theme.surfaceVariantText + wrapMode: Text.WordWrap + } + + StyledText { + text: I18n.tr("Reasoning effort") + font.pixelSize: Theme.fontSizeSmall + color: Theme.surfaceVariantText + visible: root.provider === "inception" + } + DankDropdown { + width: parent.width + visible: root.provider === "inception" + options: ["instant", "low", "medium", "high"] + currentValue: root.inceptionReasoningEffort + onValueChanged: value => saveActiveField("inceptionReasoningEffort", value) + } + + RowLayout { + width: parent.width + spacing: Theme.spacingM + visible: root.provider === "inception" + Column { + Layout.fillWidth: true + spacing: Theme.spacingXS + StyledText { + text: I18n.tr("Reasoning summary") + font.pixelSize: Theme.fontSizeMedium + color: Theme.surfaceText + } + StyledText { + text: I18n.tr("Return a summary of the model's reasoning.") + font.pixelSize: Theme.fontSizeSmall + color: Theme.surfaceVariantText + wrapMode: Text.WordWrap + width: parent.width + } + } + DankToggle { + checked: root.inceptionReasoningSummary + onToggled: checked => saveActiveField("inceptionReasoningSummary", checked) + } + } + + RowLayout { + width: parent.width + spacing: Theme.spacingM + visible: root.provider === "inception" + Column { + Layout.fillWidth: true + spacing: Theme.spacingXS + StyledText { + text: I18n.tr("Wait for reasoning summary") + font.pixelSize: Theme.fontSizeMedium + color: Theme.surfaceText + } + StyledText { + text: I18n.tr("Delay final response until the reasoning summary is ready.") + font.pixelSize: Theme.fontSizeSmall + color: Theme.surfaceVariantText + wrapMode: Text.WordWrap + width: parent.width + } + } + DankToggle { + checked: root.inceptionReasoningSummaryWait + onToggled: checked => saveActiveField("inceptionReasoningSummaryWait", checked) + } + } + } } } @@ -473,7 +580,7 @@ Item { width: parent.width - parent.spacing - Theme.iconSize StyledText { - text: I18n.tr("Temperature: %1").arg(root.temperature.toFixed(1)) + text: I18n.tr("Temperature: %1").arg(root.temperature.toFixed(2)) font.pixelSize: Theme.fontSizeLarge font.weight: Font.Medium color: Theme.surfaceText @@ -493,10 +600,11 @@ Item { width: parent.width height: 32 minimum: 0 - maximum: 20 - value: Math.round(root.temperature * 10) + maximum: 200 + step: 1 + value: Math.round(root.temperature * 100) showValue: false - onSliderValueChanged: newValue => saveActiveField("temperature", newValue / 10) + onSliderValueChanged: newValue => saveActiveField("temperature", newValue / 100) } } } diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ead77d..080161c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,19 +5,28 @@ All notable changes to the AI Assistant plugin will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Added + +- **Inception** / Mercury 2: [API parameters](https://docs.inceptionlabs.ai/get-started/api-parameters) in settings (`reasoning_effort`, `reasoning_summary`, `reasoning_summary_wait`). Temperature clamped 0.5–1.0 per docs + ## [1.4.0] - 2026-03-01 ### Changed + - Replaced custom markdown parser with [marked.js v1.2.9](https://github.com/markedjs/marked) (MIT License), inlined as a self-contained UMD bundle with a Qt Rich Text-compatible custom renderer - Proper handling of all standard GFM constructs: nested lists, loose list items, setext headings, link definitions, strikethrough, task lists, and more ### Fixed + - Code blocks and blockquotes inside ordered list items no longer cause numbering to reset; code block tables are hoisted outside `