From 13e9709d0eb8636eeb1c05ab4a0594a7e40af609 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 17:56:08 +0800 Subject: [PATCH 01/78] docs: add LLM Service introduction --- docs/llm-service/introduction.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 docs/llm-service/introduction.md diff --git a/docs/llm-service/introduction.md b/docs/llm-service/introduction.md new file mode 100644 index 00000000..b6233320 --- /dev/null +++ b/docs/llm-service/introduction.md @@ -0,0 +1,19 @@ +# Welcome to LLM Service + +## About LLM Service + +LLM Service is a professional AI service module within the Bank of AI ecosystem, built on top-tier blockchain infrastructure. It is dedicated to providing users with an efficient, user-friendly, and creative AI interaction experience. As a core AI service infrastructure of Bank of AI, this service leverages the decentralization, security, and high efficiency of blockchain technology to introduce a brand-new AI service model. + +The service's core features include: + +* **Multi-Model AI Chat:** We integrate various industry-leading Large Language Models (LLMs), allowing users to select the most suitable model based on their specific needs. +* **Powerful Integrated AI Services:** We offer comprehensive AI-related API services, enabling users to access and integrate them rapidly and easily within the Bank of AI framework. +* **Web3 Native Experience:** Through seamless integration with mainstream Web3 wallets, we provide an end-to-end native experience, from login to payment. + +## Why Choose LLM Service? + +Choosing our LLM Service means enjoying the unique advantages of a secure blockchain ecosystem alongside meticulously designed features. + +* **Multi-chain Ecosystem Advantages:** As part of the Bank of AI ecosystem, users can make payments using mainstream tokens on supported chains, benefiting from fast transaction confirmations and low fees. +* **Low Cost & High Efficiency:** By optimizing resources and ensuring efficient on-chain interactions, we deliver highly cost-effective AI services to users. +* **Security & Privacy Protection:** We utilize a decentralized login method. Users can complete authentication simply by signing with their Web3 wallet, ensuring greater security and privacy for all AI interactions. From 3c381543d414ec0d74e841597b7e81a26a2ddab9 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 18:31:42 +0800 Subject: [PATCH 02/78] docs: add Quick Start for LLM Service --- docs/llm-service/quick-start.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 docs/llm-service/quick-start.md diff --git a/docs/llm-service/quick-start.md b/docs/llm-service/quick-start.md new file mode 100644 index 00000000..250895be --- /dev/null +++ b/docs/llm-service/quick-start.md @@ -0,0 +1,32 @@ +# Quick Start + +This chapter will guide you through the initial setup of the LLM Service within the Bank of AI platform. Follow these steps to kickstart your AI journey. + +## 1. Connect Wallet +LLM Service utilizes a decentralized login method. You can use supported Web3 wallets to connect and authorize the login. This method is secure and convenient, eliminating the need to remember complex usernames and passwords. + +### How to Login: +1. Visit the [Bank of AI Chat platform](https://chat.bankofai.io/chat). +2. Click the **Log in** button in the top right corner of the page. +3. In the pop-up window, select your wallet provider and authorize the connection. +4. Confirm the signature request within your wallet to complete the login. + +Upon successful login, your wallet address will be displayed in the top right corner, indicating that you have successfully accessed the platform. + +*Note: Please ensure you have a compatible Web3 wallet installed in your browser or mobile device. Always store your mnemonic phrase and private key securely.* + +## 2. Start Your First Conversation +Once inside the platform, you can immediately start interacting with the AI through our LLM Service. + +* **Select AI Model:** In the chat interface, you will see the current default AI model. Click the model name to expand the list and select the model (e.g., GPT-4o, Claude 3.5, Gemini) you wish to use. +* **Send Message:** In the input box at the bottom of the page, enter your prompt or task, then click the send button or press Enter. +* **Contextual Interaction:** The AI's response will be presented in a dialogue format. You can engage in multi-turn conversations, and the AI will respond based on the context. + +## 3. Credits and Usage +The LLM Service operates on a credit-based system. You can obtain credits to use AI services through seamless on-chain transactions. + +* **Top-up Interface:** Navigate to the **Top up** section in the dashboard to manage your balance. +* **Purchase Credits:** The platform supports mainstream tokens on supported blockchains. Simply confirm the transaction in your wallet to transfer tokens to the designated address. +* **Automatic Crediting:** Once the transaction is confirmed on the blockchain, the system will automatically credit the corresponding value to your account balance. + +After completing these steps, you can enjoy streamlined access to various industry-leading AI models. From d801547caa59035e8c42010ea6c059ee1c855aad Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 18:41:58 +0800 Subject: [PATCH 03/78] Create pricing-and-usage.md --- docs/llm-service/pricing-and-usage.md | 49 +++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 docs/llm-service/pricing-and-usage.md diff --git a/docs/llm-service/pricing-and-usage.md b/docs/llm-service/pricing-and-usage.md new file mode 100644 index 00000000..11020dc6 --- /dev/null +++ b/docs/llm-service/pricing-and-usage.md @@ -0,0 +1,49 @@ +# Pricing and Usage + +## Credits & Pricing + +The Bank of AI platform utilizes a unified credit system to measure and settle usage for all AI services. + +* **Credit Calculation Rules:** The number of tokens consumed in each interaction with the AI is converted into corresponding credits based on the pricing standards of different models and deducted from your account balance. +* **Token Consumption Details:** In the AI response details, the platform displays the breakdown of token consumption, helping you understand the specific sources of credit usage and optimize future usage patterns. +* **Pricing for Different Models:** Pricing varies due to differences in capabilities and computational costs among AI models. Generally, more capable models consume more credits. The web search feature incurs an additional fee and is charged on a pay-per-use basis. Some models do not support web search marked as "-". + +### Specific Model Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | Web Search (Credits/Use) | +| :--- | :--- | :--- | :--- | +| ChatGPT-5.2 | 1.75 | 14.00 | 10,000 | +| ChatGPT-5-mini | 0.25 | 2.00 | 10,000 | +| ChatGPT-5-nano | 0.05 | 0.40 | - | +| Claude Opus 4.6 | 5.00 | 25.00 | 10,000 | +| Claude Opus 4.5 | 5.00 | 25.00 | 10,000 | +| Claude Sonnet 4.6 | 3.00 | 15.00 | 10,000 | +| Claude Sonnet 4.5 | 3.00 | 15.00 | 10,000 | +| Claude Haiku 4.5 | 1.00 | 5.00 | 10,000 | +| Gemini 3.1 Pro | 2.00 | 12.00 | 14,000 | +| Gemini 3 Flash | 0.50 | 3.00 | 14,000 | + +> **Calculation Example:** If you use a model with a rate of 1.25 (Input) and 10.00 (Output) to ask a question (10 input tokens) and receive a response (50 output tokens), the dialogue consumes **512.5 credits** (10 × 1.25 + 50 × 10). You can check the specific usage by hovering over the model name in the bottom right corner of the chat. + +## Usage Information + +You can view detailed data regarding all your consumption on the **Usage** page via the left navigation bar. + +* **Usage Overview:** Displays your credit balance and total consumption for the current month. +* **Monthly Usage Chart:** An intuitive bar chart to track usage fluctuations over the past year. +* **Usage Detail:** Every record corresponds precisely to a single AI interaction, including creation time, model used, token usage, credits consumed, and response time. + +## Deposit + +Bank of AI operates on a pre-paid model. Leveraging secure blockchain technology, the platform offers a convenient deposit experience. + +* **Deposit Process:** On the **Top up** page, the platform will guide you to pay using your connected Web3 wallet. Simply confirm the transaction in the wallet pop-up window to complete it. +* **Supported Token Types:** The platform supports various mainstream tokens on supported networks (including TRON and BNB Chain). +* **Arrival Time:** Once the transaction is confirmed on the blockchain, the system will automatically issue the equivalent value of credits to your account, typically within a few minutes. + +## Billing & Invoices + +View your complete deposit history under the **History** tab on the **Top up** page. + +* **Deposit Records:** Displays creation time, type, transaction hash, and token information for each deposit. +* **Transparency:** You can click the transaction hash to verify details on the corresponding blockchain explorer (e.g., TRONSCAN or BscScan). From ef887574d7851806e491ce75d75d5b01f66a9d74 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 19:10:24 +0800 Subject: [PATCH 04/78] Create chatgpt-5-2.md --- docs/llm-service/models/chatgpt-5-2.md | 30 ++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 docs/llm-service/models/chatgpt-5-2.md diff --git a/docs/llm-service/models/chatgpt-5-2.md b/docs/llm-service/models/chatgpt-5-2.md new file mode 100644 index 00000000..a098e9c1 --- /dev/null +++ b/docs/llm-service/models/chatgpt-5-2.md @@ -0,0 +1,30 @@ +# ChatGPT-5.2 + +## Overview +ChatGPT-5.2 is the latest generation of the flagship large language model developed by OpenAI. Building upon the powerful capabilities of the 5.1 version, it further optimizes the speed of multimodal processing and the execution efficiency of complex tasks, making it the ideal choice for professional users seeking ultimate performance and efficiency. + +## Key Features +* **Efficient Multimodal Processing:** Significantly improves the parsing and generation speed of image and video content compared to 5.1, achieving a smoother multimodal interaction experience. +* **Enhanced Task Execution Efficiency:** Optimizes the internal reasoning engine, allowing for faster and more accurate conclusions when handling long-chain, multi-step complex tasks. +* **Stronger Interference Resistance:** Exhibits greater robustness and accuracy when processing inputs containing significant noise or ambiguous instructions. + +## Best Use Cases +* **Real-time Data Analysis and Visualization:** Capable of quickly processing real-time data streams and generating complex charts and visualization reports. +* **Complex Project Management and Planning:** Assists with task decomposition, resource allocation, and risk assessment for efficient decision support. +* **High-Frequency, High-Precision Professional Consulting:** Suitable for professional fields requiring fast and accurate responses, such as financial trading analysis and legal document retrieval. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | Extremely Strong. Maintains a leading position in complex logical reasoning and scientific computation, with improved efficiency. | +| **Creative Ability** | Extremely Strong. Can generate high-quality, in-depth content, particularly excelling in structured and professional texts. | +| **Multimodal Ability** | Comprehensive and Efficient. Supports input and understanding of images, videos, and audio, and can quickly generate high-quality image content. | +| **Response Speed** | Medium to Slow. Improved compared to 5.1, but still a deep analysis model, not suitable for extremely low-latency scenarios. | +| **Context Window** | Huge. Supports a context window of millions of tokens. | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **ChatGPT-5.2** | 1.75 | 14.00 | From 7d5c48c3bd1277b0f541300ef4ecf0420d27d81b Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 19:12:56 +0800 Subject: [PATCH 05/78] Create chatgpt-5-mini.md --- docs/llm-service/models/chatgpt-5-mini.md | 30 +++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 docs/llm-service/models/chatgpt-5-mini.md diff --git a/docs/llm-service/models/chatgpt-5-mini.md b/docs/llm-service/models/chatgpt-5-mini.md new file mode 100644 index 00000000..2cfa3b82 --- /dev/null +++ b/docs/llm-service/models/chatgpt-5-mini.md @@ -0,0 +1,30 @@ +# ChatGPT-5-mini + +## Overview +ChatGPT-5-mini is an efficient and economical lightweight language model. It is optimized for fast, smooth daily conversations and general tasks, making it a premier choice for cost-effective AI interaction within the Bank of AI ecosystem. + +## Key Features +* **Extremely Fast Response:** Deeply optimized for low response latency, providing a near real-time conversation experience. +* **High Cost-Effectiveness:** While ensuring high-quality output, its computational cost is significantly lower than flagship models, achieving a balance between performance and cost. +* **Strong General Capabilities:** Covers a wide range of daily application scenarios, from quick Q&A to text organization, with stable and reliable performance. + +## Best Use Cases +* **Daily Conversation and Quick Q&A:** Acts as a smart assistant for quickly answering factual questions and engaging in casual chat. +* **Text Processing:** Summarizing, polishing, formatting, and extracting keywords from emails, articles, and documents. +* **Initial Draft Generation:** Quickly generating drafts for social media posts, product descriptions, and blog articles. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Medium.** Can handle simple logical reasoning, but may falter on multi-step complex problems. | +| **Creative Ability** | **Medium.** Generates fluent and coherent text, but is relatively limited in depth and professional creativity. | +| **Multimodal Ability** | **Not Supported.** This model focuses on text processing and does not have image or audio understanding capabilities. | +| **Response Speed** | **Fast.** One of the fastest responding models on the platform. | +| **Context Window** | **Standard.** Supports tens of thousands of tokens, sufficient for most daily conversation scenarios. | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **ChatGPT-5-mini** | 0.25 | 2.00 | From 0589a75c8bd0812940d48a3d3c807e8d15f52c62 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 19:15:43 +0800 Subject: [PATCH 06/78] Create chatgpt-5-nano.md --- docs/llm-service/models/chatgpt-5-nano.md | 30 +++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 docs/llm-service/models/chatgpt-5-nano.md diff --git a/docs/llm-service/models/chatgpt-5-nano.md b/docs/llm-service/models/chatgpt-5-nano.md new file mode 100644 index 00000000..38dd9559 --- /dev/null +++ b/docs/llm-service/models/chatgpt-5-nano.md @@ -0,0 +1,30 @@ +# ChatGPT-5-nano + +## Overview +ChatGPT-5-nano is an advanced language model that strikes an excellent balance between performance, speed, and cost. It is designed to provide near-professional AI capabilities at a moderate cost within the Bank of AI ecosystem. + +## Key Features +* **Enhanced Reasoning Ability:** Nano shows significant improvements in logical reasoning, code generation, and multilingual processing compared to lighter models. +* **Efficient Performance:** Carefully tuned to maintain high output quality while sustaining a fast response speed. +* **Multifunctional Integration:** Capable of handling a diverse range of tasks, making it a powerful assistant for developers and content creators. + +## Best Use Cases +* **Code Assistance and Debugging:** Understanding and generating code across multiple programming languages, assisting with debugging and documentation. +* **Multilingual Translation and Writing:** Providing high-quality cross-language translation and creating content in authentic language styles. +* **Structured Content Generation:** Generating well-formatted reports, technical documents, tutorials, and other structured content. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Strong.** Can handle complex logical problems and programming tasks, performing well in specific domains. | +| **Creative Ability** | **Strong.** Generates creative and in-depth text content, meeting high writing requirements. | +| **Multimodal Ability** | **Limited Support.** Can understand and describe simple image content, but does not support deep multimodal analysis. | +| **Response Speed** | **Medium.** Faster than flagship models, though slightly slower than the mini model. | +| **Context Window** | **Large.** Supports a context window of hundreds of thousands of tokens for long document processing. | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **ChatGPT-5-nano** | 0.05 | 0.40 | From 059956886f91b36d0cc357a078dbbc2a9df5b3c5 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 19:20:18 +0800 Subject: [PATCH 07/78] Create claude-opus-4-6.md --- docs/llm-service/models/claude-opus-4-6.md | 32 ++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 docs/llm-service/models/claude-opus-4-6.md diff --git a/docs/llm-service/models/claude-opus-4-6.md b/docs/llm-service/models/claude-opus-4-6.md new file mode 100644 index 00000000..96809673 --- /dev/null +++ b/docs/llm-service/models/claude-opus-4-6.md @@ -0,0 +1,32 @@ +# Claude Opus 4.6 + +## Overview +Claude Opus 4.6 is the latest iteration of Anthropic's flagship AI model series. Building upon the 4.5 version's exceptional intelligence and profound ethical considerations, it further enhances complex reasoning capabilities, multimodal understanding, and performance in specific professional domains, aiming to provide a more powerful and precise AI experience within the Bank of AI ecosystem. + +## Key Features +* **Enhanced Complex Reasoning:** Deeply optimized for multi-step, cross-domain, and abstract concept reasoning, performing exceptionally well in solving complex scientific, engineering, and legal problems. +* **Refined Multimodal Understanding:** Improved detail capture and contextual correlation for images, charts, and videos, enabling more accurate analysis of complex multimodal inputs. +* **Deepened Professional Knowledge:** Expanded breadth and depth in specific fields such as advanced financial analysis, drug discovery, and complex system design. +* **Distinction from 4.5:** Version 4.6 focuses on **depth and precision** improvements, providing more reliable and detailed outputs for tasks requiring ultimate accuracy. + +## Best Use Cases +* **Cutting-Edge Scientific Research:** Assisting in complex data analysis, theoretical validation, and exploration of new discoveries. +* **Advanced Strategic Consulting:** Providing strategic decision support based on vast information and deep analytical insights. +* **Legal and Compliance Review:** Processing complex legal texts, performing risk assessments, and compliance analysis. +* **Innovative Content Generation:** Creating research reports, technical whitepapers, and high-end market analysis requiring logical rigor. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Top-tier.** Further enhanced beyond 4.5, particularly adept at problems requiring deep thought and abstract logic. | +| **Creative Ability** | **Extremely Strong.** Generates logically rigorous, insightful, and elegantly styled professional texts. | +| **Multimodal Ability** | **Refined.** Enhanced ability to extract insights from complex visual materials and subtle multimodal information. | +| **Response Speed** | **Slow.** Prioritizes quality and depth; suitable for tasks that are not time-sensitive. | +| **Context Window** | **Huge.** Supports an ultra-long context window for processing massive documents and codebases. | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **Claude Opus 4.6** | 5.00 | 25.00 | From 19d6d35ae315a194893f9ebcbcaf327e3c197492 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 19:21:34 +0800 Subject: [PATCH 08/78] Create claude-opus-4-5.md --- docs/llm-service/models/claude-opus-4-5.md | 30 ++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 docs/llm-service/models/claude-opus-4-5.md diff --git a/docs/llm-service/models/claude-opus-4-5.md b/docs/llm-service/models/claude-opus-4-5.md new file mode 100644 index 00000000..004e1da9 --- /dev/null +++ b/docs/llm-service/models/claude-opus-4-5.md @@ -0,0 +1,30 @@ +# Claude Opus 4.5 + +## Overview +Claude Opus 4.5 is the flagship AI model developed by Anthropic, integrated into the Bank of AI platform. It is renowned for its superior intelligence, top-tier performance, and profound ethical considerations, setting new benchmarks for professional AI applications. + +## Key Features +* **Unparalleled Intelligence:** Sets industry benchmarks in graduate-level reasoning, mathematics, and coding, capable of solving extremely complex and open-ended problems. +* **Powerful Vision Understanding:** Possesses top-tier visual analysis capabilities, accurately interpreting complex charts, scientific illustrations, and multi-image documents. +* **Extra-Long Context Processing:** Supports a massive context window, effortlessly handling and analyzing documents or entire codebases with hundreds of thousands of words. + +## Best Use Cases +* **Complex System Analysis:** Understanding complex flowcharts and API documentation, and autonomously writing code to automate cross-system tasks. +* **High-End Scientific Research:** Analyzing complex scientific papers, interpreting experimental data, and proposing innovative research hypotheses. +* **Enterprise Strategic Planning:** Deeply analyzing market trends, financial reports, and legal contracts to provide high-quality decision support. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Top-tier.** Outperforms other models in benchmark tests, especially adept at handling problems requiring deep thought. | +| **Creative Ability** | **Extremely Strong.** Generates logically coherent, insightful, and elegantly styled text across all genres. | +| **Multimodal Ability** | **Powerful.** Skilled at extracting insights from complex visual materials and visual data analysis. | +| **Response Speed** | **Slow.** Prioritizes the highest quality and depth over response time. | +| **Context Window** | **Huge.** Supports a context window of over 200K tokens, with potential for further expansion. | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **Claude Opus 4.5** | 5.00 | 25.00 | From 10c93872390575b26df3559108872f064167fd80 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 19:25:05 +0800 Subject: [PATCH 09/78] Create claude-sonnet-4-6.md --- docs/llm-service/models/claude-sonnet-4-6.md | 32 ++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 docs/llm-service/models/claude-sonnet-4-6.md diff --git a/docs/llm-service/models/claude-sonnet-4-6.md b/docs/llm-service/models/claude-sonnet-4-6.md new file mode 100644 index 00000000..fa6c270d --- /dev/null +++ b/docs/llm-service/models/claude-sonnet-4-6.md @@ -0,0 +1,32 @@ +# Claude Sonnet 4.6 + +## Overview +Claude Sonnet 4.6 is a major upgrade to Anthropic's Sonnet series, designed to deliver performance close to the flagship Opus model at a more economical cost. It achieves comprehensive improvements in coding, computer use, long-context reasoning, and knowledge work. For the first time in the Sonnet series, it introduces a **1,000,000 token context window (beta)**, making it an ideal choice for enterprise-level automation and complex task processing. + +## Key Features +* **Near-Opus Performance:** In multiple benchmarks, especially for processing enterprise documents (charts, PDFs, tables), its performance is comparable to the Opus 4.6. +* **1M Token Context Window:** Brings a million-token context window to the Sonnet series for the first time, capable of processing entire codebases or massive research papers. +* **Excellent Coding & Computer Use:** Demonstrates outstanding ability to use real software, reaching or exceeding human-level performance in complex code fixes and multi-step web workflows. +* **Distinction from 4.5:** A comprehensive skill upgrade over 4.5 with stronger instruction-following, fewer hallucinations, and more reliable multi-step task execution. + +## Best Use Cases +* **Enterprise Document Analysis:** Efficiently processing and reasoning with complex PDF documents containing charts and tables. +* **Large-Scale Codebase Refactoring:** Ideal for codebase-level analysis and modification thanks to its million-token context. +* **Cost-Effective Agentic Workflows:** Providing top-tier performance for automated tasks requiring multi-step planning. +* **Knowledge Work & Design:** Excelling in high-quality content generation and front-end design with fewer iterations required. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Strong.** Performance in long-context reasoning and complex problem-solving is close to the Opus level. | +| **Creative Ability** | **Strong.** Particularly adept at generating high-quality code and front-end pages with a good sense of design. | +| **Multimodal Ability** | **Supported.** Capable of processing and understanding multimodal inputs such as images and PDFs. | +| **Response Speed** | **Medium.** Achieves an excellent balance between performance and speed. | +| **Context Window** | **1,000,000 Tokens (beta).** Supports a max output of 128,000 tokens. | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **Claude Sonnet 4.6** | 3.00 | 15.00 | From bcee033af213cfe3e10916be991f1ef46d3f0d3d Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 19:26:49 +0800 Subject: [PATCH 10/78] Create claude-sonnet-4-5.md --- docs/llm-service/models/claude-sonnet-4-5.md | 30 ++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 docs/llm-service/models/claude-sonnet-4-5.md diff --git a/docs/llm-service/models/claude-sonnet-4-5.md b/docs/llm-service/models/claude-sonnet-4-5.md new file mode 100644 index 00000000..b5626474 --- /dev/null +++ b/docs/llm-service/models/claude-sonnet-4-5.md @@ -0,0 +1,30 @@ +# Claude Sonnet 4.5 + +## Overview +Claude Sonnet 4.5 is a balanced AI model developed by Anthropic, integrated into the Bank of AI platform. It is designed to provide near-flagship intelligence at a lower cost, making it the ideal choice for large-scale enterprise AI deployment. + +## Key Features +* **Ideal Intelligence & Speed:** Far surpasses similar models in intelligence while offering twice the response speed of the Opus model series. +* **Enterprise-Grade Optimization:** Specifically tuned for core business scenarios such as knowledge retrieval, sales automation, and complex data analysis. +* **Extra-Long Context & High Recall:** Supports a massive context window while maintaining extremely high information recall accuracy even in very long documents. + +## Best Use Cases +* **Enterprise Knowledge Management:** Quickly and accurately retrieving information from vast documentation to provide precise answers for customers or employees. +* **Sales & Marketing Automation:** Analyzing market data, generating personalized marketing copy, and automatically processing sales leads. +* **Code Quality Control:** Efficiently generating and reviewing code to help development teams improve both speed and code quality. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Strong.** Performs well in most reasoning and coding tasks, meeting the needs of professional business applications. | +| **Creative Ability** | **Strong.** Generates high-quality text that complies with rigorous business and professional standards. | +| **Multimodal Ability** | **Supported.** Strong image understanding capabilities for processing documents and reports containing charts. | +| **Response Speed** | **Medium.** Strikes an excellent balance between intelligence and speed, ideal for interactive applications. | +| **Context Window** | **Huge.** Shares the extra-long context processing capability with the flagship Opus model. | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **Claude Sonnet 4.5** | 3.00 | 15.00 | From b628118430c6ff08b480bfdd1dbf7500ec7af779 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 19:28:05 +0800 Subject: [PATCH 11/78] Create claude-haiku-4-5.md --- docs/llm-service/models/claude-haiku-4-5.md | 30 +++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 docs/llm-service/models/claude-haiku-4-5.md diff --git a/docs/llm-service/models/claude-haiku-4-5.md b/docs/llm-service/models/claude-haiku-4-5.md new file mode 100644 index 00000000..78fcf1f0 --- /dev/null +++ b/docs/llm-service/models/claude-haiku-4-5.md @@ -0,0 +1,30 @@ +# Claude Haiku 4.5 + +## Overview +Claude Haiku 4.5 is the fastest and most compact AI model developed by Anthropic, integrated into the Bank of AI platform. It is designed to provide near-instantaneous responses, making it the premier choice for building seamless, real-time AI experiences. + +## Key Features +* **Unmatched Response Speed:** As the fastest model in its intelligence class, it delivers extremely low latency, ideal for applications requiring immediate interaction. +* **Ultimate Cost-Effectiveness:** Highly competitive pricing makes it the most viable option for deploying AI at scale across massive user scenarios. +* **Enterprise-Grade Robustness:** Undergone rigorous security testing to ensure the reliability and safety required for professional enterprise applications. + +## Best Use Cases +* **Real-time Chatbots & Moderation:** Providing smooth, natural conversation experiences and quickly moderating user-generated content. +* **Mobile AI Applications:** Optimized for mobile environments where latency and resource consumption are critical factors. +* **Workflow Streamlining:** Automating routine tasks such as email classification, meeting summarization, and form data extraction to improve daily efficiency. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Medium.** Capable of handling general tasks, but limited in solving highly complex or multi-step reasoning problems. | +| **Creative Ability** | **Medium.** Generates concise and fluent text, best suited for information delivery rather than deep creative writing. | +| **Multimodal Ability** | **Supported.** Basic image understanding capabilities to identify and describe objects within visual inputs. | +| **Response Speed** | **Extremely Fast.** The fastest responding model on the platform, enabling near-instantaneous interaction. | +| **Context Window** | **Huge.** Supports an extra-long context window, capable of handling large documents and extensive conversation histories. | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **Claude Haiku 4.5** | 1.00 | 5.00 | From 5c05d991f37488500bdbf7234ec5176073fdd088 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 19:30:10 +0800 Subject: [PATCH 12/78] Create gemini-3-1-pro.md --- docs/llm-service/models/gemini-3-1-pro.md | 31 +++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 docs/llm-service/models/gemini-3-1-pro.md diff --git a/docs/llm-service/models/gemini-3-1-pro.md b/docs/llm-service/models/gemini-3-1-pro.md new file mode 100644 index 00000000..b0ef042d --- /dev/null +++ b/docs/llm-service/models/gemini-3-1-pro.md @@ -0,0 +1,31 @@ +# Gemini 3.1 pro + +## Overview +Gemini 3.1 pro is a significant upgrade to Google's flagship multimodal model series. It is designed to deliver enhanced reasoning, more reliable output, and greater efficiency. While retaining its native multimodal capabilities, it significantly improves accuracy in handling complex tasks and resolves the output truncation issues present in previous versions. + +## Key Features +* **Doubled Reasoning Capability:** Achieved a groundbreaking score on the ARC-AGI-2 benchmark, demonstrating a performance increase of more than double in novel pattern recognition compared to its predecessor. +* **Ultra-Long Context & Output:** Supports a context window of up to **1,000,000 tokens** and can generate outputs of up to **64,000 tokens** in a single run, ensuring long responses are never truncated. +* **Native Multimodality:** Seamlessly processes and understands information across various formats, including text, images, audio, and video, within a single model architecture. +* **Higher Operational Efficiency:** Delivers more reliable results with optimized token usage, enhancing overall performance for complex workflows. + +## Best Use Cases +* **Complex Code Generation:** Ideal for software development tasks that require deep logical reasoning and complete, non-truncated code blocks. +* **Large-Scale Data Analysis:** Capable of processing and deeply analyzing vast amounts of documents or datasets in a single pass. +* **Creative Long-Form Writing:** Drafting detailed reports, scripts, or technical whitepapers that require sustained logical coherence over thousands of words. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Extremely Strong.** Significant improvements in abstract reasoning and novel problem-solving. | +| **Creative Ability** | **Extremely Strong.** Capable of generating high-quality, lengthy, and logically coherent content. | +| **Multimodal Ability** | **Native & Comprehensive.** Supports mixed inputs of text, images, audio, and video seamlessly. | +| **Response Speed** | **Medium to Slow.** Prioritizes the quality, depth, and completeness of professional-grade output. | +| **Context Window** | **1,000,000 Tokens.** (Supports a maximum output of 64,000 tokens). | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **Gemini 3.1 pro** | 2.00 | 12.00 | From ca45afc5d2c3861ee25d40ebf7ca8d6fb66d6e2a Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 19:31:25 +0800 Subject: [PATCH 13/78] Create gemini-3-flash.md --- docs/llm-service/models/gemini-3-flash.md | 30 +++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 docs/llm-service/models/gemini-3-flash.md diff --git a/docs/llm-service/models/gemini-3-flash.md b/docs/llm-service/models/gemini-3-flash.md new file mode 100644 index 00000000..78a88384 --- /dev/null +++ b/docs/llm-service/models/gemini-3-flash.md @@ -0,0 +1,30 @@ +# Gemini 3 flash + +## Overview +Gemini 3 flash is the fastest and most efficient model in the Gemini 3 series released by Google. It is designed for applications requiring rapid response and high throughput, retaining the native multimodal capabilities of the series while significantly optimizing for speed and cost. + +## Key Features +* **Rapid Response & High Throughput:** Optimized for low-latency and high-concurrency scenarios, making it the preferred choice for real-time AI applications. +* **Efficient Multimodality:** Inherits native multimodal capabilities to quickly process and understand information like images and audio, with lower computational requirements than the Pro version. +* **Excellent Cost-Effectiveness:** Ensures high speed and multimodal versatility while significantly reducing operating costs within the Bank of AI ecosystem. + +## Best Use Cases +* **Real-time Chatbots:** Providing smooth, instant, and multimodal interactive experiences for customer service and support. +* **Content Moderation:** Quickly identifying and filtering non-compliant content in both text and image formats. +* **Mobile & Edge Applications:** Ideal for latency-sensitive scenarios that require quick feedback and efficient resource usage. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Strong.** Handles most general and complex reasoning tasks, though less depth than the Pro version for niche professional problems. | +| **Creative Ability** | **Strong.** Quickly generates high-quality text and detailed multimodal content descriptions. | +| **Multimodal Ability** | **Native & Efficient.** Possesses strong multimodal understanding, optimized for speed over deep exhaustive analysis. | +| **Response Speed** | **Extremely Fast.** One of the fastest models on the platform, enabling near-instantaneous interaction. | +| **Context Window** | **Huge.** Supports an extremely long context window, consistent with the flagship Pro version. | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **Gemini 3 flash** | 0.50 | 3.00 | From fedd20ce2213b58e729ae1928290188ac8a15670 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 22:06:59 +0800 Subject: [PATCH 14/78] Create chat-completion.md --- docs/llm-service/api/chat-completion.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 docs/llm-service/api/chat-completion.md diff --git a/docs/llm-service/api/chat-completion.md b/docs/llm-service/api/chat-completion.md new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/docs/llm-service/api/chat-completion.md @@ -0,0 +1 @@ + From 2c2caa7d1edcbb23312e65ae6d521e0a2f25d016 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 22:27:54 +0800 Subject: [PATCH 15/78] Create integration-guide.md --- docs/llm-service/openclaw/integration-guide.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 docs/llm-service/openclaw/integration-guide.md diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/docs/llm-service/openclaw/integration-guide.md @@ -0,0 +1 @@ + From ef2d68b0f9f555c980387faf3751d07a52dc6183 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 22:35:14 +0800 Subject: [PATCH 16/78] Update integration-guide.md --- .../llm-service/openclaw/integration-guide.md | 308 ++++++++++++++++++ 1 file changed, 308 insertions(+) diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index 8b137891..1fcf7b2f 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -1 +1,309 @@ +# Integrating OpenClaw with an LLM Gateway +## Overview + +From zero to a private AI agent in about 15 minutes. + +OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant that runs locally on your own machine. Instead of relying on a cloud-only SaaS workflow, it gives you more control over your data, memory, files, and automations. + +You can connect it to messaging platforms such as WhatsApp, Telegram, Lark, and DingTalk, and use it to handle email, manage calendars, write code, or automate everyday tasks. + +OpenClaw is more than a chatbot. It acts like a real agent with persistent memory, local file system access, internet access, and expandable skills. + +This guide walks through downloading, installing, and configuring OpenClaw, then connecting it to an **LLM Gateway** through a compatible API. + +--- + +# Step 1: Obtain an API Key + +Before integrating OpenClaw with the **LLM Gateway**, you need to generate an API key. + +1. Visit the API key management page + https://chat.bankofai.io/key + +2. Sign in to your account. + +3. Create or copy your **API Key**. + +You will use this key later when configuring the model provider inside OpenClaw. + +Keep your API key secure and do not share it publicly. + +--- + +# Step 2: Prepare Your System + +Before installing OpenClaw, make sure your system meets these requirements. + +| Requirement | Details | +|---|---| +| Node.js | Version 22 or higher | +| Operating System | macOS, Linux, or Windows via WSL2 | +| Package Manager | npm recommended | + +Check your Node.js version: + +```bash +node -v +``` + +![Check Node.js version](https://files.readme.io/ac27744855c7066d117a856e7005166662e707462312b1925c4e368f5c9c7427-1.png) + +If your version is lower than **v22.0.0**, install or upgrade Node.js first. + +--- + +# Step 3: Install OpenClaw + +For beginners, the easiest installation method is: + +```bash +npm install -g openclaw +``` + +Verify the installation: + +```bash +openclaw --version +``` + +![Verify OpenClaw installation](https://files.readme.io/6f39989ed49f307a5168bd934c4ffaf2a79db8a2268c6959c3bb731f43138598-2.png) + +### Troubleshooting + +#### Sharp module error + +Run: + +```bash +npm install -g openclaw --force +``` + +#### openclaw command not found + +Find npm global path: + +```bash +npm config get prefix +``` + +Add it to your PATH if necessary. + +--- + +# Step 4: Complete the Initialization Wizard + +Run: + +```bash +openclaw onboard +``` + +The wizard includes three parts. + +--- + +## 4.1 Model Configuration + +When asked to choose a model provider, select: + +**Skip for now** + +We will configure the LLM Gateway manually. + +![Skip model provider](https://files.readme.io/458a23b58f79ec97f4fee7ed6668a6a799cb37f95be57ddfe96830ad77bb2bb5-4.png) + +--- + +## 4.2 Communication Channels + +Choose messaging platforms. + +You may skip for now. + +![Skip channels](https://files.readme.io/178a9aded9bb4934dcaf7445c0c4edf4569836183ac6019b8c8d08e2917b4549-5.png) + +--- + +## 4.3 Skills + +For beginners select: + +**No** + +![Skip skills](https://files.readme.io/d4155d69eb1dd69c1586fa5ab844c602914b0b5a1934265bd97e1f8444b40b5a-6.png) + +--- + +# Step 5: Configure the LLM Gateway + +Open the configuration file: + +```bash +~/.openclaw/openclaw.json +``` + +Add a provider configuration. + +```json +{ + "models": { + "mode": "merge", + "providers": { + "llm-gateway": { + "baseUrl": "https://api.example.com/v1/", + "apiKey": "{YOUR_API_KEY}", + "api": "openai-completions", + "models": [ + { "id": "gpt-5.2", "name": "gpt-5.2" }, + { "id": "gpt-5-mini", "name": "gpt-5-mini" }, + { "id": "gpt-5-nano", "name": "gpt-5-nano" }, + { "id": "claude-opus-4.6", "name": "claude-opus-4.6" }, + { "id": "claude-sonnet-4.6", "name": "claude-sonnet-4.6" }, + { "id": "claude-haiku-4.5", "name": "claude-haiku-4.5" } + ] + } + } + } +} +``` + +Replace: + +- `baseUrl` +- `apiKey` + +with your real values. + +--- + +## Set Default Model + +In the same file: + +```json +{ + "agents": { + "default": { + "model": "llm-gateway/gpt-5-nano" + } + } +} +``` + +--- + +## Restart Gateway + +```bash +openclaw gateway restart +``` + +--- + +## Test the Connection + +```bash +openclaw agent --agent main --message "How are you doing today?" +``` + +If successful, the agent will respond. + +![Successful response](https://files.readme.io/312c6feb51ea9f4071b75efd3182ef0507a6981baebd3bf7e8d57dec33978efd-7.png) + +--- + +# Step 6: Gateway Commands + +| Action | Command | +|---|---| +| Install | `openclaw gateway install` | +| Start | `openclaw gateway start` | +| Stop | `openclaw gateway stop` | +| Restart | `openclaw gateway restart` | +| Status | `openclaw gateway status` | + +--- + +# Diagnostic Commands + +```bash +openclaw doctor +``` + +Runs a health check. + +--- + +# Step 7: Launch OpenClaw + +## Web Dashboard + +```bash +openclaw ui +``` + +Default: + +``` +http://127.0.0.1:18789 +``` + +--- + +## Terminal UI + +```bash +openclaw tui +``` + +![OpenClaw TUI](https://files.readme.io/9ca9be0db1c21be12488934030c8ef3076c17fbb4fc26cfdb0e71425e46d121d-8.png) + +Useful commands: + +| Command | Description | +|---|---| +| `/status` | system status | +| `/session` | switch session | +| `/model` | change model | +| `/help` | command list | + +--- + +# Step 8: Useful CLI Commands + +Check models: + +```bash +openclaw models status +``` + +List channels: + +```bash +openclaw channels list +``` + +Search memory: + +```bash +openclaw memory search "keyword" +``` + +Open documentation: + +```bash +openclaw docs +``` + +--- + +# Next Steps + +You can now: + +- Add Telegram or other messaging channels +- Enable skills +- Connect additional APIs +- Customize models +- Build automated AI workflows From b46cff4a59e0646a5367a9a1995d8affdad4afca Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 22:43:25 +0800 Subject: [PATCH 17/78] Update integration-guide.md --- .../llm-service/openclaw/integration-guide.md | 166 ++++++++++-------- 1 file changed, 94 insertions(+), 72 deletions(-) diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index 1fcf7b2f..0c25886e 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -1,47 +1,73 @@ # Integrating OpenClaw with an LLM Gateway -## Overview +## Table of Contents + +- [Overview](#overview) +- [Step 1: Obtain an API Key](#step-1-obtain-an-api-key) +- [Step 2: Prepare Your System](#step-2-prepare-your-system) +- [Step 3: Install OpenClaw](#step-3-install-openclaw) +- [Step 4: Run the Initialization Wizard](#step-4-run-the-initialization-wizard) +- [Step 5: Configure the LLM Gateway](#step-5-configure-the-llm-gateway) +- [Step 6: Gateway Commands](#step-6-gateway-commands) +- [Step 7: Launch OpenClaw](#step-7-launch-openclaw) +- [Step 8: Useful CLI Commands](#step-8-useful-cli-commands) +- [Next Steps](#next-steps) -From zero to a private AI agent in about 15 minutes. +--- + +# Overview + +OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant that runs locally on your machine. -OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant that runs locally on your own machine. Instead of relying on a cloud-only SaaS workflow, it gives you more control over your data, memory, files, and automations. +Unlike cloud-only AI assistants, OpenClaw gives you control over: -You can connect it to messaging platforms such as WhatsApp, Telegram, Lark, and DingTalk, and use it to handle email, manage calendars, write code, or automate everyday tasks. +- Local memory +- Filesystem access +- External tools +- Automation workflows -OpenClaw is more than a chatbot. It acts like a real agent with persistent memory, local file system access, internet access, and expandable skills. +It can connect to messaging platforms such as: -This guide walks through downloading, installing, and configuring OpenClaw, then connecting it to an **LLM Gateway** through a compatible API. +- Telegram +- WhatsApp +- Lark +- DingTalk + +This guide explains how to install OpenClaw and connect it to an **LLM Gateway** that exposes an **OpenAI-compatible API**. + +Once connected, OpenClaw can use external large language models through the gateway. --- # Step 1: Obtain an API Key -Before integrating OpenClaw with the **LLM Gateway**, you need to generate an API key. +Before integrating OpenClaw with the **LLM Gateway**, you need an API key. -1. Visit the API key management page - https://chat.bankofai.io/key +Visit the API key management page: -2. Sign in to your account. +https://chat.bankofai.io/key -3. Create or copy your **API Key**. +Steps: -You will use this key later when configuring the model provider inside OpenClaw. +1. Sign in to your account. +2. Generate or copy your **API Key**. +3. Store it securely. -Keep your API key secure and do not share it publicly. +You will use this key when configuring the gateway provider inside OpenClaw. --- # Step 2: Prepare Your System -Before installing OpenClaw, make sure your system meets these requirements. +Make sure your environment meets the following requirements. | Requirement | Details | |---|---| -| Node.js | Version 22 or higher | -| Operating System | macOS, Linux, or Windows via WSL2 | -| Package Manager | npm recommended | +| Node.js | Version **20 LTS or higher** | +| Operating System | macOS / Linux / Windows (WSL2 recommended) | +| Package Manager | npm | -Check your Node.js version: +Check Node.js version: ```bash node -v @@ -49,19 +75,19 @@ node -v ![Check Node.js version](https://files.readme.io/ac27744855c7066d117a856e7005166662e707462312b1925c4e368f5c9c7427-1.png) -If your version is lower than **v22.0.0**, install or upgrade Node.js first. +If your version is below 20, install or upgrade Node.js. --- # Step 3: Install OpenClaw -For beginners, the easiest installation method is: +Install OpenClaw globally: ```bash npm install -g openclaw ``` -Verify the installation: +Verify installation: ```bash openclaw --version @@ -69,9 +95,11 @@ openclaw --version ![Verify OpenClaw installation](https://files.readme.io/6f39989ed49f307a5168bd934c4ffaf2a79db8a2268c6959c3bb731f43138598-2.png) -### Troubleshooting +--- -#### Sharp module error +## Troubleshooting Installation + +### Sharp module error Run: @@ -79,7 +107,7 @@ Run: npm install -g openclaw --force ``` -#### openclaw command not found +### openclaw command not found Find npm global path: @@ -87,29 +115,29 @@ Find npm global path: npm config get prefix ``` -Add it to your PATH if necessary. +Ensure the path is included in your shell `PATH`. --- -# Step 4: Complete the Initialization Wizard +# Step 4: Run the Initialization Wizard -Run: +Start the onboarding wizard: ```bash openclaw onboard ``` -The wizard includes three parts. +The wizard will guide you through several setup steps. --- -## 4.1 Model Configuration +## 4.1 Model Provider When asked to choose a model provider, select: **Skip for now** -We will configure the LLM Gateway manually. +We skip this step because the **LLM Gateway will be configured manually in the configuration file**. ![Skip model provider](https://files.readme.io/458a23b58f79ec97f4fee7ed6668a6a799cb37f95be57ddfe96830ad77bb2bb5-4.png) @@ -117,9 +145,9 @@ We will configure the LLM Gateway manually. ## 4.2 Communication Channels -Choose messaging platforms. +Choose messaging channels if desired. -You may skip for now. +You can skip this step and add them later. ![Skip channels](https://files.readme.io/178a9aded9bb4934dcaf7445c0c4edf4569836183ac6019b8c8d08e2917b4549-5.png) @@ -127,10 +155,12 @@ You may skip for now. ## 4.3 Skills -For beginners select: +For beginners, select: **No** +You can enable skills later. + ![Skip skills](https://files.readme.io/d4155d69eb1dd69c1586fa5ab844c602914b0b5a1934265bd97e1f8444b40b5a-6.png) --- @@ -143,7 +173,9 @@ Open the configuration file: ~/.openclaw/openclaw.json ``` -Add a provider configuration. +Add the gateway provider configuration. + +Example: ```json { @@ -151,8 +183,8 @@ Add a provider configuration. "mode": "merge", "providers": { "llm-gateway": { - "baseUrl": "https://api.example.com/v1/", - "apiKey": "{YOUR_API_KEY}", + "base_url": "https://api.example.com/v1", + "api_key": "{YOUR_API_KEY}", "api": "openai-completions", "models": [ { "id": "gpt-5.2", "name": "gpt-5.2" }, @@ -170,16 +202,16 @@ Add a provider configuration. Replace: -- `baseUrl` -- `apiKey` +- `base_url` +- `api_key` -with your real values. +with your real gateway endpoint and API key. --- ## Set Default Model -In the same file: +In the same configuration file: ```json { @@ -207,7 +239,7 @@ openclaw gateway restart openclaw agent --agent main --message "How are you doing today?" ``` -If successful, the agent will respond. +If the gateway is configured correctly, the agent will respond. ![Successful response](https://files.readme.io/312c6feb51ea9f4071b75efd3182ef0507a6981baebd3bf7e8d57dec33978efd-7.png) @@ -217,21 +249,11 @@ If successful, the agent will respond. | Action | Command | |---|---| -| Install | `openclaw gateway install` | -| Start | `openclaw gateway start` | -| Stop | `openclaw gateway stop` | -| Restart | `openclaw gateway restart` | -| Status | `openclaw gateway status` | - ---- - -# Diagnostic Commands - -```bash -openclaw doctor -``` - -Runs a health check. +| Install Gateway | `openclaw gateway install` | +| Start Gateway | `openclaw gateway start` | +| Stop Gateway | `openclaw gateway stop` | +| Restart Gateway | `openclaw gateway restart` | +| Check Status | `openclaw gateway status` | --- @@ -239,20 +261,20 @@ Runs a health check. ## Web Dashboard +Start the web interface: + ```bash openclaw ui ``` -Default: - -``` -http://127.0.0.1:18789 -``` +OpenClaw will display the **local access URL** in the terminal. --- ## Terminal UI +Launch the terminal interface: + ```bash openclaw tui ``` @@ -263,16 +285,16 @@ Useful commands: | Command | Description | |---|---| -| `/status` | system status | -| `/session` | switch session | -| `/model` | change model | -| `/help` | command list | +| `/status` | View system status | +| `/session` | Switch session | +| `/model` | Change model | +| `/help` | Show command list | --- # Step 8: Useful CLI Commands -Check models: +Check model status: ```bash openclaw models status @@ -300,10 +322,10 @@ openclaw docs # Next Steps -You can now: +You can now extend your OpenClaw setup by: -- Add Telegram or other messaging channels -- Enable skills -- Connect additional APIs -- Customize models -- Build automated AI workflows +- Adding messaging channels (e.g. Telegram) +- Enabling skills +- Connecting external APIs +- Customizing model routing +- Building automated AI workflows From b26284f57eac304772e4da234369336b36aac081 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 22:51:13 +0800 Subject: [PATCH 18/78] Create ne-click-script-tutorial.md --- .../openclaw/ne-click-script-tutorial.md | 193 ++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 docs/llm-service/openclaw/ne-click-script-tutorial.md diff --git a/docs/llm-service/openclaw/ne-click-script-tutorial.md b/docs/llm-service/openclaw/ne-click-script-tutorial.md new file mode 100644 index 00000000..375e5331 --- /dev/null +++ b/docs/llm-service/openclaw/ne-click-script-tutorial.md @@ -0,0 +1,193 @@ +# OpenClaw Integration with LLM Gateway (One-Click Script Tutorial) + +## Overview + +This tutorial shows how to quickly integrate **OpenClaw** with an **LLM Gateway** using a one-click setup script. + +OpenClaw is a local AI agent framework that runs on your machine and connects to external large language models through APIs. With a gateway that exposes an **OpenAI-compatible API**, OpenClaw can easily interact with various LLM providers. + +The one-click script simplifies the setup process by automatically installing dependencies, configuring the model provider, and initializing OpenClaw. + +--- + +# Step 1: Obtain an API Key + +Before running the integration script, you need an API key for the **LLM Gateway**. + +Visit the API key management page: + +https://chat.bankofai.io/key + +Steps: + +1. Sign in to your account +2. Generate an **API Key** +3. Copy and store the key securely + +You will use this key during the setup process. + +--- + +# Step 2: Run the One-Click Setup Script + +Run the following command in your terminal: + +```bash +curl -fsSL https://example.com/openclaw-install.sh | bash +``` + +This script will automatically: + +- Install OpenClaw +- Configure the LLM Gateway provider +- Install required dependencies +- Initialize OpenClaw configuration + +After the script finishes, OpenClaw will be ready to use. + +--- + +# Step 3: Verify the Installation + +Check the OpenClaw version: + +```bash +openclaw --version +``` + +If OpenClaw was installed correctly, the command will display the current version. + +![OpenClaw version check](https://files.readme.io/6f39989ed49f307a5168bd934c4ffaf2a79db8a2268c6959c3bb731f43138598-2.png) + +--- + +# Step 4: Configure the LLM Gateway + +Open the configuration file: + +```bash +~/.openclaw/openclaw.json +``` + +Add your gateway configuration. + +Example: + +```json +{ + "models": { + "mode": "merge", + "providers": { + "llm-gateway": { + "base_url": "https://api.example.com/v1", + "api_key": "YOUR_API_KEY", + "api": "openai-completions", + "models": [ + { "id": "gpt-5-mini", "name": "gpt-5-mini" }, + { "id": "gpt-5-nano", "name": "gpt-5-nano" }, + { "id": "claude-sonnet", "name": "claude-sonnet" } + ] + } + } + } +} +``` + +Replace: + +- `base_url` +- `api_key` + +with your real gateway endpoint and API key. + +--- + +# Step 5: Restart the Gateway + +Restart OpenClaw after editing the configuration: + +```bash +openclaw gateway restart +``` + +--- + +# Step 6: Test the Integration + +Run a test command: + +```bash +openclaw agent --agent main --message "Hello OpenClaw" +``` + +If the integration is successful, the agent will respond using the configured LLM. + +![Successful test response](https://files.readme.io/312c6feb51ea9f4071b75efd3182ef0507a6981baebd3bf7e8d57dec33978efd-7.png) + +--- + +# Step 7: Launch the Web Interface + +Start the OpenClaw dashboard: + +```bash +openclaw ui +``` + +OpenClaw will print a local access URL in the terminal. + +Example: + +``` +http://127.0.0.1:18789 +``` + +--- + +# Step 8: Launch the Terminal Interface + +You can also use the terminal UI: + +```bash +openclaw tui +``` + +![OpenClaw TUI](https://files.readme.io/9ca9be0db1c21be12488934030c8ef3076c17fbb4fc26cfdb0e71425e46d121d-8.png) + +Useful commands: + +| Command | Description | +|---|---| +| `/status` | view system status | +| `/session` | switch chat session | +| `/model` | change model | +| `/help` | show available commands | + +--- + +# Troubleshooting + +### Gateway Not Responding + +Run a health check: + +```bash +openclaw doctor +``` + +Check gateway status: + +```bash +openclaw gateway status +``` + +--- + +# Next Steps + +After completing the integration, you can: + +- Add messaging channels (Telegram, Slack, etc.) +- Enable agent skills +- Connect external APIs +- Build automated AI workflows From d34d9357e2862aac272b196a93621ffbfebb5c29 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 22:51:33 +0800 Subject: [PATCH 19/78] Rename ne-click-script-tutorial.md to one-click-script-tutorial.md --- .../{ne-click-script-tutorial.md => one-click-script-tutorial.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/llm-service/openclaw/{ne-click-script-tutorial.md => one-click-script-tutorial.md} (100%) diff --git a/docs/llm-service/openclaw/ne-click-script-tutorial.md b/docs/llm-service/openclaw/one-click-script-tutorial.md similarity index 100% rename from docs/llm-service/openclaw/ne-click-script-tutorial.md rename to docs/llm-service/openclaw/one-click-script-tutorial.md From 3db8666620c69dd7280f57bf3ae306b385c82b11 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Sun, 15 Mar 2026 22:53:47 +0800 Subject: [PATCH 20/78] Update one-click-script-tutorial.md --- .../openclaw/one-click-script-tutorial.md | 192 ------------------ 1 file changed, 192 deletions(-) diff --git a/docs/llm-service/openclaw/one-click-script-tutorial.md b/docs/llm-service/openclaw/one-click-script-tutorial.md index 375e5331..8b137891 100644 --- a/docs/llm-service/openclaw/one-click-script-tutorial.md +++ b/docs/llm-service/openclaw/one-click-script-tutorial.md @@ -1,193 +1 @@ -# OpenClaw Integration with LLM Gateway (One-Click Script Tutorial) -## Overview - -This tutorial shows how to quickly integrate **OpenClaw** with an **LLM Gateway** using a one-click setup script. - -OpenClaw is a local AI agent framework that runs on your machine and connects to external large language models through APIs. With a gateway that exposes an **OpenAI-compatible API**, OpenClaw can easily interact with various LLM providers. - -The one-click script simplifies the setup process by automatically installing dependencies, configuring the model provider, and initializing OpenClaw. - ---- - -# Step 1: Obtain an API Key - -Before running the integration script, you need an API key for the **LLM Gateway**. - -Visit the API key management page: - -https://chat.bankofai.io/key - -Steps: - -1. Sign in to your account -2. Generate an **API Key** -3. Copy and store the key securely - -You will use this key during the setup process. - ---- - -# Step 2: Run the One-Click Setup Script - -Run the following command in your terminal: - -```bash -curl -fsSL https://example.com/openclaw-install.sh | bash -``` - -This script will automatically: - -- Install OpenClaw -- Configure the LLM Gateway provider -- Install required dependencies -- Initialize OpenClaw configuration - -After the script finishes, OpenClaw will be ready to use. - ---- - -# Step 3: Verify the Installation - -Check the OpenClaw version: - -```bash -openclaw --version -``` - -If OpenClaw was installed correctly, the command will display the current version. - -![OpenClaw version check](https://files.readme.io/6f39989ed49f307a5168bd934c4ffaf2a79db8a2268c6959c3bb731f43138598-2.png) - ---- - -# Step 4: Configure the LLM Gateway - -Open the configuration file: - -```bash -~/.openclaw/openclaw.json -``` - -Add your gateway configuration. - -Example: - -```json -{ - "models": { - "mode": "merge", - "providers": { - "llm-gateway": { - "base_url": "https://api.example.com/v1", - "api_key": "YOUR_API_KEY", - "api": "openai-completions", - "models": [ - { "id": "gpt-5-mini", "name": "gpt-5-mini" }, - { "id": "gpt-5-nano", "name": "gpt-5-nano" }, - { "id": "claude-sonnet", "name": "claude-sonnet" } - ] - } - } - } -} -``` - -Replace: - -- `base_url` -- `api_key` - -with your real gateway endpoint and API key. - ---- - -# Step 5: Restart the Gateway - -Restart OpenClaw after editing the configuration: - -```bash -openclaw gateway restart -``` - ---- - -# Step 6: Test the Integration - -Run a test command: - -```bash -openclaw agent --agent main --message "Hello OpenClaw" -``` - -If the integration is successful, the agent will respond using the configured LLM. - -![Successful test response](https://files.readme.io/312c6feb51ea9f4071b75efd3182ef0507a6981baebd3bf7e8d57dec33978efd-7.png) - ---- - -# Step 7: Launch the Web Interface - -Start the OpenClaw dashboard: - -```bash -openclaw ui -``` - -OpenClaw will print a local access URL in the terminal. - -Example: - -``` -http://127.0.0.1:18789 -``` - ---- - -# Step 8: Launch the Terminal Interface - -You can also use the terminal UI: - -```bash -openclaw tui -``` - -![OpenClaw TUI](https://files.readme.io/9ca9be0db1c21be12488934030c8ef3076c17fbb4fc26cfdb0e71425e46d121d-8.png) - -Useful commands: - -| Command | Description | -|---|---| -| `/status` | view system status | -| `/session` | switch chat session | -| `/model` | change model | -| `/help` | show available commands | - ---- - -# Troubleshooting - -### Gateway Not Responding - -Run a health check: - -```bash -openclaw doctor -``` - -Check gateway status: - -```bash -openclaw gateway status -``` - ---- - -# Next Steps - -After completing the integration, you can: - -- Add messaging channels (Telegram, Slack, etc.) -- Enable agent skills -- Connect external APIs -- Build automated AI workflows From a9b0079fe747fb80adfce8f291cbf87841ad89af Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 14:38:45 +0800 Subject: [PATCH 21/78] Add files via upload --- docs/llm-service/api/ai_studio_code.yaml | 98 ++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 docs/llm-service/api/ai_studio_code.yaml diff --git a/docs/llm-service/api/ai_studio_code.yaml b/docs/llm-service/api/ai_studio_code.yaml new file mode 100644 index 00000000..a52e3d1e --- /dev/null +++ b/docs/llm-service/api/ai_studio_code.yaml @@ -0,0 +1,98 @@ +openapi: 3.0.3 +info: + title: 用户管理 API 示例 + description: 这是一个用于在 GitHub 上展示的 OpenAPI 规范示例,包含了基础的用户查询接口。 + version: 1.0.0 + contact: + name: 你的名字/团队名 + url: https://github.com/你的用户名 + email: your-email@example.com + +# 定义 API 的基础路径 (可配置多个,如下方包含生产环境和测试环境) +servers: + - url: https://api.yoursite.com/v1 + description: 生产环境服务器 + - url: https://staging-api.yoursite.com/v1 + description: 测试环境服务器 + +# 定义所有的接口路径 +paths: + /users/{id}: + get: + tags: + - Users # 用于在 Swagger UI 中对接口进行分类 + summary: 获取用户信息 + description: 根据用户的唯一 ID 获取该用户的详细信息。 + operationId: getUserById + + # 定义请求参数 + parameters: + - name: id + in: path # 参数位置:可以是在 path(路径), query(问号后面), header, cookie + description: 用户的唯一 ID + required: true + schema: + type: integer + format: int64 + example: 1 + + # 定义所有可能的响应结果 + responses: + '200': + description: 请求成功,返回用户信息 + content: + application/json: + schema: + $ref: '#/components/schemas/UserResponse' # 引用下方定义的数据模型 + '400': + description: 请求参数错误 (例如 ID 格式不对) + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + '404': + description: 用户不存在 + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + +# 定义可复用的组件 (如数据模型、安全认证等) +components: + schemas: + # 成功响应的 User 数据模型 + UserResponse: + type: object + properties: + code: + type: integer + example: 200 + data: + type: object + properties: + id: + type: integer + format: int64 + example: 1 + name: + type: string + example: GitHub User + email: + type: string + format: email + example: user@example.com + createdAt: + type: string + format: date-time + example: "2023-10-01T12:00:00Z" + + # 失败响应的 Error 数据模型 + ErrorResponse: + type: object + properties: + code: + type: integer + example: 404 + message: + type: string + example: "未找到该用户" \ No newline at end of file From c2b0274e5e2af36c748cabdab5103047179fa798 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 15:04:21 +0800 Subject: [PATCH 22/78] Update sidebars.js --- sidebars.js | 60 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 5 deletions(-) diff --git a/sidebars.js b/sidebars.js index b37e251a..096e1508 100644 --- a/sidebars.js +++ b/sidebars.js @@ -69,12 +69,15 @@ const sidebars = { type: 'doc', id: '8004/SupportedNetworks', }, - { type: 'category', label: 'Usage', collapsed: false, - items: ['8004/Usage/Install', '8004/Usage/ConfigureAgents', '8004/Usage/RegistrationHTTP'], + items: [ + '8004/Usage/Install', + '8004/Usage/ConfigureAgents', + '8004/Usage/RegistrationHTTP' + ], }, ], }, @@ -103,7 +106,6 @@ const sidebars = { 'McpServer-Skills/MCP/TRONMCPServer/OfficialServerAccess', 'McpServer-Skills/MCP/TRONMCPServer/LocalPrivatizedDeployment', 'McpServer-Skills/MCP/TRONMCPServer/API', - ], }, { @@ -115,7 +117,6 @@ const sidebars = { 'McpServer-Skills/MCP/SUNMCPServer/OfficialServerAccess', 'McpServer-Skills/MCP/SUNMCPServer/LocalPrivatizedDeployment', 'McpServer-Skills/MCP/SUNMCPServer/API', - ], }, { @@ -146,7 +147,56 @@ const sidebars = { type: 'category', label: 'Openclaw Extension', collapsed: false, - items: ['Openclaw-extension/Overview', 'Openclaw-extension/Setup-use'], + items: [ + 'Openclaw-extension/Overview', + 'Openclaw-extension/Setup-use' + ], + }, + + /* --- 这里是为你修正后的 LLM Service 部分 --- */ + { + type: 'category', + label: 'LLM Service', + collapsed: false, + items: [ + { type: 'doc', id: 'llm-service/introduction', label: 'Introduction' }, + { type: 'doc', id: 'llm-service/quick-start', label: 'Quick Start' }, + { type: 'doc', id: 'llm-service/pricing-and-usage', label: 'Pricing and Usage' }, + { + type: 'category', + label: 'Models', + collapsed: true, + items: [ + 'llm-service/models/chatgpt-5-2', + 'llm-service/models/chatgpt-5-mini', + 'llm-service/models/chatgpt-5-nano', + 'llm-service/models/claude-haiku-4-5', + 'llm-service/models/claude-opus-4-5', + 'llm-service/models/claude-opus-4-6', + 'llm-service/models/claude-sonnet-4-5', + 'llm-service/models/claude-sonnet-4-6', + 'llm-service/models/gemini-3-1-pro', + 'llm-service/models/gemini-3-flash', + ], + }, + { + type: 'category', + label: 'OpenClaw', + collapsed: true, + items: [ + 'llm-service/openclaw/integration-guide', + 'llm-service/openclaw/one-click-script-tutorial', + ], + }, + { + type: 'category', + label: 'API', + collapsed: true, + items: [ + 'llm-service/api/chat-completion', + ], + }, + ], }, ], } From b711bc941590ce8de7a45b54aba357b6e8af4156 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 16:09:13 +0800 Subject: [PATCH 23/78] Add files via upload --- docs/llm-service/api/ai_studio_code.md | 47 ++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 docs/llm-service/api/ai_studio_code.md diff --git a/docs/llm-service/api/ai_studio_code.md b/docs/llm-service/api/ai_studio_code.md new file mode 100644 index 00000000..550aed28 --- /dev/null +++ b/docs/llm-service/api/ai_studio_code.md @@ -0,0 +1,47 @@ +# 用户管理 API 示例 +这是一个用于在 GitHub 上展示的 OpenAPI 规范示例,包含了基础的用户查询接口。 + +## Version: 1.0.0 + +**Contact information:** +你的名字/团队名 +https://github.com/你的用户名 +your-email@example.com + +--- + +### [GET] /users/{id} +**获取用户信息** + +根据用户的唯一 ID 获取该用户的详细信息。 + +#### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | 用户的唯一 ID | Yes | long | + +#### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | 请求成功,返回用户信息 | **application/json**: [UserResponse](#userresponse)
| +| 400 | 请求参数错误 (例如 ID 格式不对) | **application/json**: [ErrorResponse](#errorresponse)
| +| 404 | 用户不存在 | **application/json**: [ErrorResponse](#errorresponse)
| + +--- +### Schemas + +#### UserResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | integer | *Example:* `200` | No | +| data | { **"id"**: long, **"name"**: string, **"email"**: string (email), **"createdAt"**: dateTime } | | No | + +#### ErrorResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | integer | *Example:* `404` | No | +| message | string | *Example:* `"未找到该用户"` | No | From 3f3bd9efa3d0a3ab4c21a1d75f3874b3729c3ecf Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 17:10:36 +0800 Subject: [PATCH 24/78] Add files via upload --- docs/llm-service/api/swagger.json | 390 ++++++++++++++++++++++++++++++ 1 file changed, 390 insertions(+) create mode 100644 docs/llm-service/api/swagger.json diff --git a/docs/llm-service/api/swagger.json b/docs/llm-service/api/swagger.json new file mode 100644 index 00000000..5b9ea1c6 --- /dev/null +++ b/docs/llm-service/api/swagger.json @@ -0,0 +1,390 @@ +{ + "schemes": [ + "https" + ], + "swagger": "2.0", + "info": { + "description": "OpenAPI spec for /v1/models and /v1/chat/completions (OpenAI format).", + "title": "AI API (OpenAI Compatible)", + "contact": {}, + "version": "1.0" + }, + "host": "api.ainft.com", + "basePath": "/", + "paths": { + "/v1/chat/completions": { + "post": { + "description": "Chat completion. Auth: Bearer token. Non-stream: JSON with choices[].content. Stream: SSE chunks with choices[].delta.content.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Chat Completions" + ], + "summary": "Create chat completion (OpenAI compatible)", + "parameters": [ + { + "type": "string", + "description": "Bearer \u0026lt;token\u0026gt;, e.g. Bearer sk-xxx", + "name": "Authorization", + "in": "header", + "required": true + }, + { + "description": "Request body (model, messages required; stream, max_tokens, temperature, top_p, stop, n optional)", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/main.ChatCompletionsRequest" + } + } + ], + "responses": { + "200": { + "description": "Non-stream: choices[].content. Stream (SSE): each chunk is ChatCompletionsStreamChunk with choices[].delta.content.", + "schema": { + "$ref": "#/definitions/main.ChatCompletionsResponse" + } + }, + "401": { + "description": "Authentication failed", + "schema": { + "type": "object" + } + } + } + } + }, + "/v1/models": { + "get": { + "description": "List available models. Auth: Bearer token. Response: object, success, data.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Model List" + ], + "summary": "List models (OpenAI compatible)", + "parameters": [ + { + "type": "string", + "description": "Bearer \u0026lt;token\u0026gt;, e.g. Bearer sk-xxx", + "name": "Authorization", + "in": "header", + "required": true + } + ], + "responses": { + "200": { + "description": "object: list; success: true; data: array of { id, object, created, owned_by }", + "schema": { + "$ref": "#/definitions/main.V1ModelsResponse" + } + }, + "401": { + "description": "Authentication failed", + "schema": { + "type": "object" + } + } + } + } + } + }, + "definitions": { + "main.ChatChoice": { + "type": "object", + "properties": { + "content": { + "type": "string" + }, + "finish_reason": { + "type": "string", + "example": "stop" + }, + "index": { + "type": "integer" + } + } + }, + "main.ChatCompletionsRequest": { + "type": "object", + "properties": { + "frequency_penalty": { + "description": "FrequencyPenalty: -2.0 to 2.0. Penalize repeated tokens. Default 0.", + "type": "number" + }, + "max_tokens": { + "description": "MaxTokens: maximum number of tokens that can be generated in the completion.", + "type": "integer" + }, + "messages": { + "description": "Messages: list of messages in the conversation. Required.", + "type": "array", + "items": { + "$ref": "#/definitions/main.ChatMessage" + } + }, + "model": { + "description": "Model: ID of the model to use (e.g. gpt-4). Required.", + "type": "string", + "example": "gpt-4" + }, + "n": { + "description": "N: how many chat completion choices to generate. Default 1.", + "type": "integer" + }, + "presence_penalty": { + "description": "PresencePenalty: -2.0 to 2.0. Penalize tokens that appear in the text so far. Default 0.", + "type": "number" + }, + "response_format": { + "description": "ResponseFormat: specify output format: { \"type\": \"text\" } or { \"type\": \"json_object\" } or json_schema.", + "allOf": [ + { + "$ref": "#/definitions/main.ChatResponseFormat" + } + ] + }, + "seed": { + "description": "Seed: random seed for deterministic sampling (if supported by model).", + "type": "integer" + }, + "stop": { + "description": "Stop: up to 4 sequences where the API will stop generating. String or array of strings." + }, + "stream": { + "description": "Stream: if true, partial message deltas will be sent as server-sent events. Default false.", + "type": "boolean" + }, + "temperature": { + "description": "Temperature: sampling temperature between 0 and 2. Higher = more random. Default 1.", + "type": "number" + }, + "tool_choice": { + "description": "ToolChoice: \"none\" | \"auto\" | { \"type\": \"function\", \"function\": { \"name\": \"...\" } }. Controls which tool(s) to call." + }, + "tools": { + "description": "Tools: list of tools the model may call. Each has type \"function\" and function { name, description?, parameters? }.", + "type": "array", + "items": { + "$ref": "#/definitions/main.ChatTool" + } + }, + "top_p": { + "description": "TopP: nucleus sampling: consider tokens with top_p probability mass. Default 1.", + "type": "number" + }, + "user": { + "description": "User: optional end-user identifier for abuse monitoring.", + "type": "string" + } + } + }, + "main.ChatCompletionsResponse": { + "type": "object", + "properties": { + "choices": { + "type": "array", + "items": { + "$ref": "#/definitions/main.ChatChoice" + } + }, + "created": { + "type": "integer", + "example": 1677652288 + }, + "id": { + "type": "string", + "example": "chatcmpl-xxx" + }, + "model": { + "type": "string", + "example": "gpt-4" + }, + "object": { + "type": "string", + "example": "chat.completion" + }, + "usage": { + "$ref": "#/definitions/main.ChatUsage" + } + } + }, + "main.ChatMessage": { + "type": "object", + "properties": { + "content": { + "description": "Content: message content. For tool role, the result of the tool call.", + "type": "string", + "example": "Hello" + }, + "name": { + "description": "Name: optional name for the message author (e.g. to disambiguate multiple users).", + "type": "string" + }, + "role": { + "description": "Role: \"system\" | \"user\" | \"assistant\" | \"tool\". System sets behavior; user/assistant are conversation; tool is tool result.", + "type": "string", + "example": "user" + }, + "tool_call_id": { + "description": "ToolCallId: when role is \"tool\", the id of the tool call this result is for. Required for tool messages.", + "type": "string" + }, + "tool_calls": { + "description": "ToolCalls: when role is \"assistant\" and the model called tools, array of { id, type, function: { name, arguments } }.", + "type": "array", + "items": { + "$ref": "#/definitions/main.ChatToolCallItem" + } + } + } + }, + "main.ChatResponseFormat": { + "type": "object", + "properties": { + "json_schema": { + "description": "JsonSchema: when type is json_schema, optional schema for the output." + }, + "type": { + "description": "Type: \"text\" or \"json_object\".", + "type": "string" + } + } + }, + "main.ChatTool": { + "type": "object", + "properties": { + "function": { + "description": "Function: function definition (name, description, parameters).", + "allOf": [ + { + "$ref": "#/definitions/main.ChatToolFunction" + } + ] + }, + "type": { + "description": "Type: must be \"function\".", + "type": "string", + "example": "function" + } + } + }, + "main.ChatToolCallFunction": { + "type": "object", + "properties": { + "arguments": { + "description": "Arguments: JSON string of the arguments.", + "type": "string" + }, + "name": { + "description": "Name: name of the function to call.", + "type": "string" + } + } + }, + "main.ChatToolCallItem": { + "type": "object", + "properties": { + "function": { + "description": "Function: name and arguments of the call.", + "allOf": [ + { + "$ref": "#/definitions/main.ChatToolCallFunction" + } + ] + }, + "id": { + "description": "Id: ID of the tool call.", + "type": "string" + }, + "type": { + "description": "Type: \"function\".", + "type": "string", + "example": "function" + } + } + }, + "main.ChatToolFunction": { + "type": "object", + "properties": { + "description": { + "description": "Description: optional description for the model.", + "type": "string" + }, + "name": { + "description": "Name: name of the function.", + "type": "string" + }, + "parameters": { + "description": "Parameters: optional JSON schema for the function arguments." + } + } + }, + "main.ChatUsage": { + "type": "object", + "properties": { + "completion_tokens": { + "description": "CompletionTokens: number of tokens in the completion.", + "type": "integer" + }, + "prompt_tokens": { + "description": "PromptTokens: number of tokens in the prompt.", + "type": "integer" + }, + "total_tokens": { + "description": "TotalTokens: total tokens (prompt + completion).", + "type": "integer" + } + } + }, + "main.V1ModelItem": { + "type": "object", + "properties": { + "created": { + "type": "integer", + "example": 1626777600 + }, + "id": { + "type": "string", + "example": "gpt-4" + }, + "object": { + "type": "string", + "example": "model" + }, + "owned_by": { + "type": "string", + "example": "openai" + } + } + }, + "main.V1ModelsResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/definitions/main.V1ModelItem" + } + }, + "object": { + "type": "string", + "example": "list" + }, + "success": { + "type": "boolean", + "example": true + } + } + } + } +} \ No newline at end of file From 5409581b7cdfcfa2c5152beda920ab1813a44a55 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 17:11:57 +0800 Subject: [PATCH 25/78] Add files via upload --- docs/llm-service/api/Bankofai API.md | 171 +++++++++++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100644 docs/llm-service/api/Bankofai API.md diff --git a/docs/llm-service/api/Bankofai API.md b/docs/llm-service/api/Bankofai API.md new file mode 100644 index 00000000..550b607c --- /dev/null +++ b/docs/llm-service/api/Bankofai API.md @@ -0,0 +1,171 @@ +# AI API (OpenAI Compatible) +OpenAPI spec for /v1/models and /v1/chat/completions (OpenAI format). + +## Version: 1.0 + +**Schemes:** https + +--- +### /v1/chat/completions + +#### POST +##### Summary + +Create chat completion (OpenAI compatible) + +##### Description + +Chat completion. Auth: Bearer token. Non-stream: JSON with choices[].content. Stream: SSE chunks with choices[].delta.content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| Authorization | header | Bearer <token>, e.g. Bearer sk-xxx | Yes | string | +| body | body | Request body (model, messages required; stream, max_tokens, temperature, top_p, stop, n optional) | Yes | [main.ChatCompletionsRequest](#mainchatcompletionsrequest) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Non-stream: choices[].content. Stream (SSE): each chunk is ChatCompletionsStreamChunk with choices[].delta.content. | [main.ChatCompletionsResponse](#mainchatcompletionsresponse) | +| 401 | Authentication failed | object | + +--- +### /v1/models + +#### GET +##### Summary + +List models (OpenAI compatible) + +##### Description + +List available models. Auth: Bearer token. Response: object, success, data. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| Authorization | header | Bearer <token>, e.g. Bearer sk-xxx | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | object: list; success: true; data: array of { id, object, created, owned_by } | [main.V1ModelsResponse](#mainv1modelsresponse) | +| 401 | Authentication failed | object | + +--- +### Models + +#### main.ChatChoice + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| finish_reason | string | *Example:* `"stop"` | No | +| index | integer | | No | + +#### main.ChatCompletionsRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| frequency_penalty | number | FrequencyPenalty: -2.0 to 2.0. Penalize repeated tokens. Default 0. | No | +| max_tokens | integer | MaxTokens: maximum number of tokens that can be generated in the completion. | No | +| messages | [ [main.ChatMessage](#mainchatmessage) ] | Messages: list of messages in the conversation. Required. | No | +| model | string | Model: ID of the model to use (e.g. gpt-4). Required.
*Example:* `"gpt-4"` | No | +| n | integer | N: how many chat completion choices to generate. Default 1. | No | +| presence_penalty | number | PresencePenalty: -2.0 to 2.0. Penalize tokens that appear in the text so far. Default 0. | No | +| response_format | [main.ChatResponseFormat](#mainchatresponseformat) | ResponseFormat: specify output format: { "type": "text" } or { "type": "json_object" } or json_schema. | No | +| seed | integer | Seed: random seed for deterministic sampling (if supported by model). | No | +| stop | | Stop: up to 4 sequences where the API will stop generating. String or array of strings. | No | +| stream | boolean | Stream: if true, partial message deltas will be sent as server-sent events. Default false. | No | +| temperature | number | Temperature: sampling temperature between 0 and 2. Higher = more random. Default 1. | No | +| tool_choice | | ToolChoice: "none" \| "auto" \| { "type": "function", "function": { "name": "..." } }. Controls which tool(s) to call. | No | +| tools | [ [main.ChatTool](#mainchattool) ] | Tools: list of tools the model may call. Each has type "function" and function { name, description?, parameters? }. | No | +| top_p | number | TopP: nucleus sampling: consider tokens with top_p probability mass. Default 1. | No | +| user | string | User: optional end-user identifier for abuse monitoring. | No | + +#### main.ChatCompletionsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| choices | [ [main.ChatChoice](#mainchatchoice) ] | | No | +| created | integer | *Example:* `1677652288` | No | +| id | string | *Example:* `"chatcmpl-xxx"` | No | +| model | string | *Example:* `"gpt-4"` | No | +| object | string | *Example:* `"chat.completion"` | No | +| usage | [main.ChatUsage](#mainchatusage) | | No | + +#### main.ChatMessage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Content: message content. For tool role, the result of the tool call.
*Example:* `"Hello"` | No | +| name | string | Name: optional name for the message author (e.g. to disambiguate multiple users). | No | +| role | string | Role: "system" \| "user" \| "assistant" \| "tool". System sets behavior; user/assistant are conversation; tool is tool result.
*Example:* `"user"` | No | +| tool_call_id | string | ToolCallId: when role is "tool", the id of the tool call this result is for. Required for tool messages. | No | +| tool_calls | [ [main.ChatToolCallItem](#mainchattoolcallitem) ] | ToolCalls: when role is "assistant" and the model called tools, array of { id, type, function: { name, arguments } }. | No | + +#### main.ChatResponseFormat + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| json_schema | | JsonSchema: when type is json_schema, optional schema for the output. | No | +| type | string | Type: "text" or "json_object". | No | + +#### main.ChatTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| function | [main.ChatToolFunction](#mainchattoolfunction) | Function: function definition (name, description, parameters). | No | +| type | string | Type: must be "function".
*Example:* `"function"` | No | + +#### main.ChatToolCallFunction + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| arguments | string | Arguments: JSON string of the arguments. | No | +| name | string | Name: name of the function to call. | No | + +#### main.ChatToolCallItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| function | [main.ChatToolCallFunction](#mainchattoolcallfunction) | Function: name and arguments of the call. | No | +| id | string | Id: ID of the tool call. | No | +| type | string | Type: "function".
*Example:* `"function"` | No | + +#### main.ChatToolFunction + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | Description: optional description for the model. | No | +| name | string | Name: name of the function. | No | +| parameters | | Parameters: optional JSON schema for the function arguments. | No | + +#### main.ChatUsage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| completion_tokens | integer | CompletionTokens: number of tokens in the completion. | No | +| prompt_tokens | integer | PromptTokens: number of tokens in the prompt. | No | +| total_tokens | integer | TotalTokens: total tokens (prompt + completion). | No | + +#### main.V1ModelItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created | integer | *Example:* `1626777600` | No | +| id | string | *Example:* `"gpt-4"` | No | +| object | string | *Example:* `"model"` | No | +| owned_by | string | *Example:* `"openai"` | No | + +#### main.V1ModelsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [main.V1ModelItem](#mainv1modelitem) ] | | No | +| object | string | *Example:* `"list"` | No | +| success | boolean | *Example:* `true` | No | From f76f57e29f1129aa5855112908230ec78dd56b16 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 17:15:58 +0800 Subject: [PATCH 26/78] Delete docs/llm-service/api/ai_studio_code.md --- docs/llm-service/api/ai_studio_code.md | 47 -------------------------- 1 file changed, 47 deletions(-) delete mode 100644 docs/llm-service/api/ai_studio_code.md diff --git a/docs/llm-service/api/ai_studio_code.md b/docs/llm-service/api/ai_studio_code.md deleted file mode 100644 index 550aed28..00000000 --- a/docs/llm-service/api/ai_studio_code.md +++ /dev/null @@ -1,47 +0,0 @@ -# 用户管理 API 示例 -这是一个用于在 GitHub 上展示的 OpenAPI 规范示例,包含了基础的用户查询接口。 - -## Version: 1.0.0 - -**Contact information:** -你的名字/团队名 -https://github.com/你的用户名 -your-email@example.com - ---- - -### [GET] /users/{id} -**获取用户信息** - -根据用户的唯一 ID 获取该用户的详细信息。 - -#### Parameters - -| Name | Located in | Description | Required | Schema | -| ---- | ---------- | ----------- | -------- | ------ | -| id | path | 用户的唯一 ID | Yes | long | - -#### Responses - -| Code | Description | Schema | -| ---- | ----------- | ------ | -| 200 | 请求成功,返回用户信息 | **application/json**: [UserResponse](#userresponse)
| -| 400 | 请求参数错误 (例如 ID 格式不对) | **application/json**: [ErrorResponse](#errorresponse)
| -| 404 | 用户不存在 | **application/json**: [ErrorResponse](#errorresponse)
| - ---- -### Schemas - -#### UserResponse - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| code | integer | *Example:* `200` | No | -| data | { **"id"**: long, **"name"**: string, **"email"**: string (email), **"createdAt"**: dateTime } | | No | - -#### ErrorResponse - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| code | integer | *Example:* `404` | No | -| message | string | *Example:* `"未找到该用户"` | No | From 787c9df34220d1d2ba713e47d6082c813441370a Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 17:16:27 +0800 Subject: [PATCH 27/78] Delete docs/llm-service/api/ai_studio_code.yaml --- docs/llm-service/api/ai_studio_code.yaml | 98 ------------------------ 1 file changed, 98 deletions(-) delete mode 100644 docs/llm-service/api/ai_studio_code.yaml diff --git a/docs/llm-service/api/ai_studio_code.yaml b/docs/llm-service/api/ai_studio_code.yaml deleted file mode 100644 index a52e3d1e..00000000 --- a/docs/llm-service/api/ai_studio_code.yaml +++ /dev/null @@ -1,98 +0,0 @@ -openapi: 3.0.3 -info: - title: 用户管理 API 示例 - description: 这是一个用于在 GitHub 上展示的 OpenAPI 规范示例,包含了基础的用户查询接口。 - version: 1.0.0 - contact: - name: 你的名字/团队名 - url: https://github.com/你的用户名 - email: your-email@example.com - -# 定义 API 的基础路径 (可配置多个,如下方包含生产环境和测试环境) -servers: - - url: https://api.yoursite.com/v1 - description: 生产环境服务器 - - url: https://staging-api.yoursite.com/v1 - description: 测试环境服务器 - -# 定义所有的接口路径 -paths: - /users/{id}: - get: - tags: - - Users # 用于在 Swagger UI 中对接口进行分类 - summary: 获取用户信息 - description: 根据用户的唯一 ID 获取该用户的详细信息。 - operationId: getUserById - - # 定义请求参数 - parameters: - - name: id - in: path # 参数位置:可以是在 path(路径), query(问号后面), header, cookie - description: 用户的唯一 ID - required: true - schema: - type: integer - format: int64 - example: 1 - - # 定义所有可能的响应结果 - responses: - '200': - description: 请求成功,返回用户信息 - content: - application/json: - schema: - $ref: '#/components/schemas/UserResponse' # 引用下方定义的数据模型 - '400': - description: 请求参数错误 (例如 ID 格式不对) - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - '404': - description: 用户不存在 - content: - application/json: - schema: - $ref: '#/components/schemas/ErrorResponse' - -# 定义可复用的组件 (如数据模型、安全认证等) -components: - schemas: - # 成功响应的 User 数据模型 - UserResponse: - type: object - properties: - code: - type: integer - example: 200 - data: - type: object - properties: - id: - type: integer - format: int64 - example: 1 - name: - type: string - example: GitHub User - email: - type: string - format: email - example: user@example.com - createdAt: - type: string - format: date-time - example: "2023-10-01T12:00:00Z" - - # 失败响应的 Error 数据模型 - ErrorResponse: - type: object - properties: - code: - type: integer - example: 404 - message: - type: string - example: "未找到该用户" \ No newline at end of file From 0511de17d960e708dbdb8a7530291b3825c9afd2 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 17:17:16 +0800 Subject: [PATCH 28/78] Update swagger.json --- docs/llm-service/api/swagger.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/llm-service/api/swagger.json b/docs/llm-service/api/swagger.json index 5b9ea1c6..265876b2 100644 --- a/docs/llm-service/api/swagger.json +++ b/docs/llm-service/api/swagger.json @@ -5,7 +5,7 @@ "swagger": "2.0", "info": { "description": "OpenAPI spec for /v1/models and /v1/chat/completions (OpenAI format).", - "title": "AI API (OpenAI Compatible)", + "title": "BANKOFAI API (OpenAI Compatible)", "contact": {}, "version": "1.0" }, @@ -387,4 +387,4 @@ } } } -} \ No newline at end of file +} From 40de1f6a8f2bd5b507a5b028f0cc1f0fd6aa1cdd Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 17:21:54 +0800 Subject: [PATCH 29/78] Delete docs/llm-service/api/swagger.json --- docs/llm-service/api/swagger.json | 390 ------------------------------ 1 file changed, 390 deletions(-) delete mode 100644 docs/llm-service/api/swagger.json diff --git a/docs/llm-service/api/swagger.json b/docs/llm-service/api/swagger.json deleted file mode 100644 index 265876b2..00000000 --- a/docs/llm-service/api/swagger.json +++ /dev/null @@ -1,390 +0,0 @@ -{ - "schemes": [ - "https" - ], - "swagger": "2.0", - "info": { - "description": "OpenAPI spec for /v1/models and /v1/chat/completions (OpenAI format).", - "title": "BANKOFAI API (OpenAI Compatible)", - "contact": {}, - "version": "1.0" - }, - "host": "api.ainft.com", - "basePath": "/", - "paths": { - "/v1/chat/completions": { - "post": { - "description": "Chat completion. Auth: Bearer token. Non-stream: JSON with choices[].content. Stream: SSE chunks with choices[].delta.content.", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "Chat Completions" - ], - "summary": "Create chat completion (OpenAI compatible)", - "parameters": [ - { - "type": "string", - "description": "Bearer \u0026lt;token\u0026gt;, e.g. Bearer sk-xxx", - "name": "Authorization", - "in": "header", - "required": true - }, - { - "description": "Request body (model, messages required; stream, max_tokens, temperature, top_p, stop, n optional)", - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/main.ChatCompletionsRequest" - } - } - ], - "responses": { - "200": { - "description": "Non-stream: choices[].content. Stream (SSE): each chunk is ChatCompletionsStreamChunk with choices[].delta.content.", - "schema": { - "$ref": "#/definitions/main.ChatCompletionsResponse" - } - }, - "401": { - "description": "Authentication failed", - "schema": { - "type": "object" - } - } - } - } - }, - "/v1/models": { - "get": { - "description": "List available models. Auth: Bearer token. Response: object, success, data.", - "consumes": [ - "application/json" - ], - "produces": [ - "application/json" - ], - "tags": [ - "Model List" - ], - "summary": "List models (OpenAI compatible)", - "parameters": [ - { - "type": "string", - "description": "Bearer \u0026lt;token\u0026gt;, e.g. Bearer sk-xxx", - "name": "Authorization", - "in": "header", - "required": true - } - ], - "responses": { - "200": { - "description": "object: list; success: true; data: array of { id, object, created, owned_by }", - "schema": { - "$ref": "#/definitions/main.V1ModelsResponse" - } - }, - "401": { - "description": "Authentication failed", - "schema": { - "type": "object" - } - } - } - } - } - }, - "definitions": { - "main.ChatChoice": { - "type": "object", - "properties": { - "content": { - "type": "string" - }, - "finish_reason": { - "type": "string", - "example": "stop" - }, - "index": { - "type": "integer" - } - } - }, - "main.ChatCompletionsRequest": { - "type": "object", - "properties": { - "frequency_penalty": { - "description": "FrequencyPenalty: -2.0 to 2.0. Penalize repeated tokens. Default 0.", - "type": "number" - }, - "max_tokens": { - "description": "MaxTokens: maximum number of tokens that can be generated in the completion.", - "type": "integer" - }, - "messages": { - "description": "Messages: list of messages in the conversation. Required.", - "type": "array", - "items": { - "$ref": "#/definitions/main.ChatMessage" - } - }, - "model": { - "description": "Model: ID of the model to use (e.g. gpt-4). Required.", - "type": "string", - "example": "gpt-4" - }, - "n": { - "description": "N: how many chat completion choices to generate. Default 1.", - "type": "integer" - }, - "presence_penalty": { - "description": "PresencePenalty: -2.0 to 2.0. Penalize tokens that appear in the text so far. Default 0.", - "type": "number" - }, - "response_format": { - "description": "ResponseFormat: specify output format: { \"type\": \"text\" } or { \"type\": \"json_object\" } or json_schema.", - "allOf": [ - { - "$ref": "#/definitions/main.ChatResponseFormat" - } - ] - }, - "seed": { - "description": "Seed: random seed for deterministic sampling (if supported by model).", - "type": "integer" - }, - "stop": { - "description": "Stop: up to 4 sequences where the API will stop generating. String or array of strings." - }, - "stream": { - "description": "Stream: if true, partial message deltas will be sent as server-sent events. Default false.", - "type": "boolean" - }, - "temperature": { - "description": "Temperature: sampling temperature between 0 and 2. Higher = more random. Default 1.", - "type": "number" - }, - "tool_choice": { - "description": "ToolChoice: \"none\" | \"auto\" | { \"type\": \"function\", \"function\": { \"name\": \"...\" } }. Controls which tool(s) to call." - }, - "tools": { - "description": "Tools: list of tools the model may call. Each has type \"function\" and function { name, description?, parameters? }.", - "type": "array", - "items": { - "$ref": "#/definitions/main.ChatTool" - } - }, - "top_p": { - "description": "TopP: nucleus sampling: consider tokens with top_p probability mass. Default 1.", - "type": "number" - }, - "user": { - "description": "User: optional end-user identifier for abuse monitoring.", - "type": "string" - } - } - }, - "main.ChatCompletionsResponse": { - "type": "object", - "properties": { - "choices": { - "type": "array", - "items": { - "$ref": "#/definitions/main.ChatChoice" - } - }, - "created": { - "type": "integer", - "example": 1677652288 - }, - "id": { - "type": "string", - "example": "chatcmpl-xxx" - }, - "model": { - "type": "string", - "example": "gpt-4" - }, - "object": { - "type": "string", - "example": "chat.completion" - }, - "usage": { - "$ref": "#/definitions/main.ChatUsage" - } - } - }, - "main.ChatMessage": { - "type": "object", - "properties": { - "content": { - "description": "Content: message content. For tool role, the result of the tool call.", - "type": "string", - "example": "Hello" - }, - "name": { - "description": "Name: optional name for the message author (e.g. to disambiguate multiple users).", - "type": "string" - }, - "role": { - "description": "Role: \"system\" | \"user\" | \"assistant\" | \"tool\". System sets behavior; user/assistant are conversation; tool is tool result.", - "type": "string", - "example": "user" - }, - "tool_call_id": { - "description": "ToolCallId: when role is \"tool\", the id of the tool call this result is for. Required for tool messages.", - "type": "string" - }, - "tool_calls": { - "description": "ToolCalls: when role is \"assistant\" and the model called tools, array of { id, type, function: { name, arguments } }.", - "type": "array", - "items": { - "$ref": "#/definitions/main.ChatToolCallItem" - } - } - } - }, - "main.ChatResponseFormat": { - "type": "object", - "properties": { - "json_schema": { - "description": "JsonSchema: when type is json_schema, optional schema for the output." - }, - "type": { - "description": "Type: \"text\" or \"json_object\".", - "type": "string" - } - } - }, - "main.ChatTool": { - "type": "object", - "properties": { - "function": { - "description": "Function: function definition (name, description, parameters).", - "allOf": [ - { - "$ref": "#/definitions/main.ChatToolFunction" - } - ] - }, - "type": { - "description": "Type: must be \"function\".", - "type": "string", - "example": "function" - } - } - }, - "main.ChatToolCallFunction": { - "type": "object", - "properties": { - "arguments": { - "description": "Arguments: JSON string of the arguments.", - "type": "string" - }, - "name": { - "description": "Name: name of the function to call.", - "type": "string" - } - } - }, - "main.ChatToolCallItem": { - "type": "object", - "properties": { - "function": { - "description": "Function: name and arguments of the call.", - "allOf": [ - { - "$ref": "#/definitions/main.ChatToolCallFunction" - } - ] - }, - "id": { - "description": "Id: ID of the tool call.", - "type": "string" - }, - "type": { - "description": "Type: \"function\".", - "type": "string", - "example": "function" - } - } - }, - "main.ChatToolFunction": { - "type": "object", - "properties": { - "description": { - "description": "Description: optional description for the model.", - "type": "string" - }, - "name": { - "description": "Name: name of the function.", - "type": "string" - }, - "parameters": { - "description": "Parameters: optional JSON schema for the function arguments." - } - } - }, - "main.ChatUsage": { - "type": "object", - "properties": { - "completion_tokens": { - "description": "CompletionTokens: number of tokens in the completion.", - "type": "integer" - }, - "prompt_tokens": { - "description": "PromptTokens: number of tokens in the prompt.", - "type": "integer" - }, - "total_tokens": { - "description": "TotalTokens: total tokens (prompt + completion).", - "type": "integer" - } - } - }, - "main.V1ModelItem": { - "type": "object", - "properties": { - "created": { - "type": "integer", - "example": 1626777600 - }, - "id": { - "type": "string", - "example": "gpt-4" - }, - "object": { - "type": "string", - "example": "model" - }, - "owned_by": { - "type": "string", - "example": "openai" - } - } - }, - "main.V1ModelsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/definitions/main.V1ModelItem" - } - }, - "object": { - "type": "string", - "example": "list" - }, - "success": { - "type": "boolean", - "example": true - } - } - } - } -} From 72680782654eb4bc84a2405f3e380ab4db033a5e Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 17:22:22 +0800 Subject: [PATCH 30/78] Delete docs/llm-service/api/Bankofai API.md --- docs/llm-service/api/Bankofai API.md | 171 --------------------------- 1 file changed, 171 deletions(-) delete mode 100644 docs/llm-service/api/Bankofai API.md diff --git a/docs/llm-service/api/Bankofai API.md b/docs/llm-service/api/Bankofai API.md deleted file mode 100644 index 550b607c..00000000 --- a/docs/llm-service/api/Bankofai API.md +++ /dev/null @@ -1,171 +0,0 @@ -# AI API (OpenAI Compatible) -OpenAPI spec for /v1/models and /v1/chat/completions (OpenAI format). - -## Version: 1.0 - -**Schemes:** https - ---- -### /v1/chat/completions - -#### POST -##### Summary - -Create chat completion (OpenAI compatible) - -##### Description - -Chat completion. Auth: Bearer token. Non-stream: JSON with choices[].content. Stream: SSE chunks with choices[].delta.content. - -##### Parameters - -| Name | Located in | Description | Required | Schema | -| ---- | ---------- | ----------- | -------- | ------ | -| Authorization | header | Bearer <token>, e.g. Bearer sk-xxx | Yes | string | -| body | body | Request body (model, messages required; stream, max_tokens, temperature, top_p, stop, n optional) | Yes | [main.ChatCompletionsRequest](#mainchatcompletionsrequest) | - -##### Responses - -| Code | Description | Schema | -| ---- | ----------- | ------ | -| 200 | Non-stream: choices[].content. Stream (SSE): each chunk is ChatCompletionsStreamChunk with choices[].delta.content. | [main.ChatCompletionsResponse](#mainchatcompletionsresponse) | -| 401 | Authentication failed | object | - ---- -### /v1/models - -#### GET -##### Summary - -List models (OpenAI compatible) - -##### Description - -List available models. Auth: Bearer token. Response: object, success, data. - -##### Parameters - -| Name | Located in | Description | Required | Schema | -| ---- | ---------- | ----------- | -------- | ------ | -| Authorization | header | Bearer <token>, e.g. Bearer sk-xxx | Yes | string | - -##### Responses - -| Code | Description | Schema | -| ---- | ----------- | ------ | -| 200 | object: list; success: true; data: array of { id, object, created, owned_by } | [main.V1ModelsResponse](#mainv1modelsresponse) | -| 401 | Authentication failed | object | - ---- -### Models - -#### main.ChatChoice - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| content | string | | No | -| finish_reason | string | *Example:* `"stop"` | No | -| index | integer | | No | - -#### main.ChatCompletionsRequest - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| frequency_penalty | number | FrequencyPenalty: -2.0 to 2.0. Penalize repeated tokens. Default 0. | No | -| max_tokens | integer | MaxTokens: maximum number of tokens that can be generated in the completion. | No | -| messages | [ [main.ChatMessage](#mainchatmessage) ] | Messages: list of messages in the conversation. Required. | No | -| model | string | Model: ID of the model to use (e.g. gpt-4). Required.
*Example:* `"gpt-4"` | No | -| n | integer | N: how many chat completion choices to generate. Default 1. | No | -| presence_penalty | number | PresencePenalty: -2.0 to 2.0. Penalize tokens that appear in the text so far. Default 0. | No | -| response_format | [main.ChatResponseFormat](#mainchatresponseformat) | ResponseFormat: specify output format: { "type": "text" } or { "type": "json_object" } or json_schema. | No | -| seed | integer | Seed: random seed for deterministic sampling (if supported by model). | No | -| stop | | Stop: up to 4 sequences where the API will stop generating. String or array of strings. | No | -| stream | boolean | Stream: if true, partial message deltas will be sent as server-sent events. Default false. | No | -| temperature | number | Temperature: sampling temperature between 0 and 2. Higher = more random. Default 1. | No | -| tool_choice | | ToolChoice: "none" \| "auto" \| { "type": "function", "function": { "name": "..." } }. Controls which tool(s) to call. | No | -| tools | [ [main.ChatTool](#mainchattool) ] | Tools: list of tools the model may call. Each has type "function" and function { name, description?, parameters? }. | No | -| top_p | number | TopP: nucleus sampling: consider tokens with top_p probability mass. Default 1. | No | -| user | string | User: optional end-user identifier for abuse monitoring. | No | - -#### main.ChatCompletionsResponse - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| choices | [ [main.ChatChoice](#mainchatchoice) ] | | No | -| created | integer | *Example:* `1677652288` | No | -| id | string | *Example:* `"chatcmpl-xxx"` | No | -| model | string | *Example:* `"gpt-4"` | No | -| object | string | *Example:* `"chat.completion"` | No | -| usage | [main.ChatUsage](#mainchatusage) | | No | - -#### main.ChatMessage - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| content | string | Content: message content. For tool role, the result of the tool call.
*Example:* `"Hello"` | No | -| name | string | Name: optional name for the message author (e.g. to disambiguate multiple users). | No | -| role | string | Role: "system" \| "user" \| "assistant" \| "tool". System sets behavior; user/assistant are conversation; tool is tool result.
*Example:* `"user"` | No | -| tool_call_id | string | ToolCallId: when role is "tool", the id of the tool call this result is for. Required for tool messages. | No | -| tool_calls | [ [main.ChatToolCallItem](#mainchattoolcallitem) ] | ToolCalls: when role is "assistant" and the model called tools, array of { id, type, function: { name, arguments } }. | No | - -#### main.ChatResponseFormat - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| json_schema | | JsonSchema: when type is json_schema, optional schema for the output. | No | -| type | string | Type: "text" or "json_object". | No | - -#### main.ChatTool - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| function | [main.ChatToolFunction](#mainchattoolfunction) | Function: function definition (name, description, parameters). | No | -| type | string | Type: must be "function".
*Example:* `"function"` | No | - -#### main.ChatToolCallFunction - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| arguments | string | Arguments: JSON string of the arguments. | No | -| name | string | Name: name of the function to call. | No | - -#### main.ChatToolCallItem - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| function | [main.ChatToolCallFunction](#mainchattoolcallfunction) | Function: name and arguments of the call. | No | -| id | string | Id: ID of the tool call. | No | -| type | string | Type: "function".
*Example:* `"function"` | No | - -#### main.ChatToolFunction - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| description | string | Description: optional description for the model. | No | -| name | string | Name: name of the function. | No | -| parameters | | Parameters: optional JSON schema for the function arguments. | No | - -#### main.ChatUsage - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| completion_tokens | integer | CompletionTokens: number of tokens in the completion. | No | -| prompt_tokens | integer | PromptTokens: number of tokens in the prompt. | No | -| total_tokens | integer | TotalTokens: total tokens (prompt + completion). | No | - -#### main.V1ModelItem - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| created | integer | *Example:* `1626777600` | No | -| id | string | *Example:* `"gpt-4"` | No | -| object | string | *Example:* `"model"` | No | -| owned_by | string | *Example:* `"openai"` | No | - -#### main.V1ModelsResponse - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| data | [ [main.V1ModelItem](#mainv1modelitem) ] | | No | -| object | string | *Example:* `"list"` | No | -| success | boolean | *Example:* `true` | No | From 07e37992908f101b93f564e889df687c15183466 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 17:27:20 +0800 Subject: [PATCH 31/78] Add files via upload --- docs/llm-service/api/Bankofai API.md | 171 +++++++++++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100644 docs/llm-service/api/Bankofai API.md diff --git a/docs/llm-service/api/Bankofai API.md b/docs/llm-service/api/Bankofai API.md new file mode 100644 index 00000000..550b607c --- /dev/null +++ b/docs/llm-service/api/Bankofai API.md @@ -0,0 +1,171 @@ +# AI API (OpenAI Compatible) +OpenAPI spec for /v1/models and /v1/chat/completions (OpenAI format). + +## Version: 1.0 + +**Schemes:** https + +--- +### /v1/chat/completions + +#### POST +##### Summary + +Create chat completion (OpenAI compatible) + +##### Description + +Chat completion. Auth: Bearer token. Non-stream: JSON with choices[].content. Stream: SSE chunks with choices[].delta.content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| Authorization | header | Bearer <token>, e.g. Bearer sk-xxx | Yes | string | +| body | body | Request body (model, messages required; stream, max_tokens, temperature, top_p, stop, n optional) | Yes | [main.ChatCompletionsRequest](#mainchatcompletionsrequest) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Non-stream: choices[].content. Stream (SSE): each chunk is ChatCompletionsStreamChunk with choices[].delta.content. | [main.ChatCompletionsResponse](#mainchatcompletionsresponse) | +| 401 | Authentication failed | object | + +--- +### /v1/models + +#### GET +##### Summary + +List models (OpenAI compatible) + +##### Description + +List available models. Auth: Bearer token. Response: object, success, data. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| Authorization | header | Bearer <token>, e.g. Bearer sk-xxx | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | object: list; success: true; data: array of { id, object, created, owned_by } | [main.V1ModelsResponse](#mainv1modelsresponse) | +| 401 | Authentication failed | object | + +--- +### Models + +#### main.ChatChoice + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| finish_reason | string | *Example:* `"stop"` | No | +| index | integer | | No | + +#### main.ChatCompletionsRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| frequency_penalty | number | FrequencyPenalty: -2.0 to 2.0. Penalize repeated tokens. Default 0. | No | +| max_tokens | integer | MaxTokens: maximum number of tokens that can be generated in the completion. | No | +| messages | [ [main.ChatMessage](#mainchatmessage) ] | Messages: list of messages in the conversation. Required. | No | +| model | string | Model: ID of the model to use (e.g. gpt-4). Required.
*Example:* `"gpt-4"` | No | +| n | integer | N: how many chat completion choices to generate. Default 1. | No | +| presence_penalty | number | PresencePenalty: -2.0 to 2.0. Penalize tokens that appear in the text so far. Default 0. | No | +| response_format | [main.ChatResponseFormat](#mainchatresponseformat) | ResponseFormat: specify output format: { "type": "text" } or { "type": "json_object" } or json_schema. | No | +| seed | integer | Seed: random seed for deterministic sampling (if supported by model). | No | +| stop | | Stop: up to 4 sequences where the API will stop generating. String or array of strings. | No | +| stream | boolean | Stream: if true, partial message deltas will be sent as server-sent events. Default false. | No | +| temperature | number | Temperature: sampling temperature between 0 and 2. Higher = more random. Default 1. | No | +| tool_choice | | ToolChoice: "none" \| "auto" \| { "type": "function", "function": { "name": "..." } }. Controls which tool(s) to call. | No | +| tools | [ [main.ChatTool](#mainchattool) ] | Tools: list of tools the model may call. Each has type "function" and function { name, description?, parameters? }. | No | +| top_p | number | TopP: nucleus sampling: consider tokens with top_p probability mass. Default 1. | No | +| user | string | User: optional end-user identifier for abuse monitoring. | No | + +#### main.ChatCompletionsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| choices | [ [main.ChatChoice](#mainchatchoice) ] | | No | +| created | integer | *Example:* `1677652288` | No | +| id | string | *Example:* `"chatcmpl-xxx"` | No | +| model | string | *Example:* `"gpt-4"` | No | +| object | string | *Example:* `"chat.completion"` | No | +| usage | [main.ChatUsage](#mainchatusage) | | No | + +#### main.ChatMessage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Content: message content. For tool role, the result of the tool call.
*Example:* `"Hello"` | No | +| name | string | Name: optional name for the message author (e.g. to disambiguate multiple users). | No | +| role | string | Role: "system" \| "user" \| "assistant" \| "tool". System sets behavior; user/assistant are conversation; tool is tool result.
*Example:* `"user"` | No | +| tool_call_id | string | ToolCallId: when role is "tool", the id of the tool call this result is for. Required for tool messages. | No | +| tool_calls | [ [main.ChatToolCallItem](#mainchattoolcallitem) ] | ToolCalls: when role is "assistant" and the model called tools, array of { id, type, function: { name, arguments } }. | No | + +#### main.ChatResponseFormat + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| json_schema | | JsonSchema: when type is json_schema, optional schema for the output. | No | +| type | string | Type: "text" or "json_object". | No | + +#### main.ChatTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| function | [main.ChatToolFunction](#mainchattoolfunction) | Function: function definition (name, description, parameters). | No | +| type | string | Type: must be "function".
*Example:* `"function"` | No | + +#### main.ChatToolCallFunction + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| arguments | string | Arguments: JSON string of the arguments. | No | +| name | string | Name: name of the function to call. | No | + +#### main.ChatToolCallItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| function | [main.ChatToolCallFunction](#mainchattoolcallfunction) | Function: name and arguments of the call. | No | +| id | string | Id: ID of the tool call. | No | +| type | string | Type: "function".
*Example:* `"function"` | No | + +#### main.ChatToolFunction + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | Description: optional description for the model. | No | +| name | string | Name: name of the function. | No | +| parameters | | Parameters: optional JSON schema for the function arguments. | No | + +#### main.ChatUsage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| completion_tokens | integer | CompletionTokens: number of tokens in the completion. | No | +| prompt_tokens | integer | PromptTokens: number of tokens in the prompt. | No | +| total_tokens | integer | TotalTokens: total tokens (prompt + completion). | No | + +#### main.V1ModelItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created | integer | *Example:* `1626777600` | No | +| id | string | *Example:* `"gpt-4"` | No | +| object | string | *Example:* `"model"` | No | +| owned_by | string | *Example:* `"openai"` | No | + +#### main.V1ModelsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [main.V1ModelItem](#mainv1modelitem) ] | | No | +| object | string | *Example:* `"list"` | No | +| success | boolean | *Example:* `true` | No | From da0d9c286f5b58e70d704a12a4fa6c859ead9acb Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 18:13:22 +0800 Subject: [PATCH 32/78] Add files via upload --- docs/llm-service/api/API.md | 173 ++++++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 docs/llm-service/api/API.md diff --git a/docs/llm-service/api/API.md b/docs/llm-service/api/API.md new file mode 100644 index 00000000..18f091af --- /dev/null +++ b/docs/llm-service/api/API.md @@ -0,0 +1,173 @@ +# AI API (OpenAI Compatible) +OpenAPI spec for /v1/models and /v1/chat/completions (OpenAI format). + +## Version: 1.0 + +**Schemes:** https: + +**Host:** api.bankofai.io + +--- +### /v1/chat/completions + +#### POST +##### Summary + +Create chat completion (OpenAI compatible) + +##### Description + +Chat completion. Auth: Bearer token. Non-stream: JSON with choices[].content. Stream: SSE chunks with choices[].delta.content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| Authorization | header | Bearer <token>, e.g. Bearer sk-xxx | Yes | string | +| body | body | Request body (model, messages required; stream, max_tokens, temperature, top_p, stop, n optional) | Yes | [main.ChatCompletionsRequest](#mainchatcompletionsrequest) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Non-stream: choices[].content. Stream (SSE): each chunk is ChatCompletionsStreamChunk with choices[].delta.content. | [main.ChatCompletionsResponse](#mainchatcompletionsresponse) | +| 401 | Authentication failed | object | + +--- +### /v1/models + +#### GET +##### Summary + +List models (OpenAI compatible) + +##### Description + +List available models. Auth: Bearer token. Response: object, success, data. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| Authorization | header | Bearer <token>, e.g. Bearer sk-xxx | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | object: list; success: true; data: array of { id, object, created, owned_by } | [main.V1ModelsResponse](#mainv1modelsresponse) | +| 401 | Authentication failed | object | + +--- +### Models + +#### main.ChatChoice + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| finish_reason | string | *Example:* `"stop"` | No | +| index | integer | | No | + +#### main.ChatCompletionsRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| frequency_penalty | number | FrequencyPenalty: -2.0 to 2.0. Penalize repeated tokens. Default 0. | No | +| max_tokens | integer | MaxTokens: maximum number of tokens that can be generated in the completion. | No | +| messages | [ [main.ChatMessage](#mainchatmessage) ] | Messages: list of messages in the conversation. Required. | No | +| model | string | Model: ID of the model to use (e.g. gpt-4). Required.
*Example:* `"gpt-4"` | No | +| n | integer | N: how many chat completion choices to generate. Default 1. | No | +| presence_penalty | number | PresencePenalty: -2.0 to 2.0. Penalize tokens that appear in the text so far. Default 0. | No | +| response_format | [main.ChatResponseFormat](#mainchatresponseformat) | ResponseFormat: specify output format: { "type": "text" } or { "type": "json_object" } or json_schema. | No | +| seed | integer | Seed: random seed for deterministic sampling (if supported by model). | No | +| stop | | Stop: up to 4 sequences where the API will stop generating. String or array of strings. | No | +| stream | boolean | Stream: if true, partial message deltas will be sent as server-sent events. Default false. | No | +| temperature | number | Temperature: sampling temperature between 0 and 2. Higher = more random. Default 1. | No | +| tool_choice | | ToolChoice: "none" \| "auto" \| { "type": "function", "function": { "name": "..." } }. Controls which tool(s) to call. | No | +| tools | [ [main.ChatTool](#mainchattool) ] | Tools: list of tools the model may call. Each has type "function" and function { name, description?, parameters? }. | No | +| top_p | number | TopP: nucleus sampling: consider tokens with top_p probability mass. Default 1. | No | +| user | string | User: optional end-user identifier for abuse monitoring. | No | + +#### main.ChatCompletionsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| choices | [ [main.ChatChoice](#mainchatchoice) ] | | No | +| created | integer | *Example:* `1677652288` | No | +| id | string | *Example:* `"chatcmpl-xxx"` | No | +| model | string | *Example:* `"gpt-4"` | No | +| object | string | *Example:* `"chat.completion"` | No | +| usage | [main.ChatUsage](#mainchatusage) | | No | + +#### main.ChatMessage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Content: message content. For tool role, the result of the tool call.
*Example:* `"Hello"` | No | +| name | string | Name: optional name for the message author (e.g. to disambiguate multiple users). | No | +| role | string | Role: "system" \| "user" \| "assistant" \| "tool". System sets behavior; user/assistant are conversation; tool is tool result.
*Example:* `"user"` | No | +| tool_call_id | string | ToolCallId: when role is "tool", the id of the tool call this result is for. Required for tool messages. | No | +| tool_calls | [ [main.ChatToolCallItem](#mainchattoolcallitem) ] | ToolCalls: when role is "assistant" and the model called tools, array of { id, type, function: { name, arguments } }. | No | + +#### main.ChatResponseFormat + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| json_schema | | JsonSchema: when type is json_schema, optional schema for the output. | No | +| type | string | Type: "text" or "json_object". | No | + +#### main.ChatTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| function | [main.ChatToolFunction](#mainchattoolfunction) | Function: function definition (name, description, parameters). | No | +| type | string | Type: must be "function".
*Example:* `"function"` | No | + +#### main.ChatToolCallFunction + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| arguments | string | Arguments: JSON string of the arguments. | No | +| name | string | Name: name of the function to call. | No | + +#### main.ChatToolCallItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| function | [main.ChatToolCallFunction](#mainchattoolcallfunction) | Function: name and arguments of the call. | No | +| id | string | Id: ID of the tool call. | No | +| type | string | Type: "function".
*Example:* `"function"` | No | + +#### main.ChatToolFunction + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | Description: optional description for the model. | No | +| name | string | Name: name of the function. | No | +| parameters | | Parameters: optional JSON schema for the function arguments. | No | + +#### main.ChatUsage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| completion_tokens | integer | CompletionTokens: number of tokens in the completion. | No | +| prompt_tokens | integer | PromptTokens: number of tokens in the prompt. | No | +| total_tokens | integer | TotalTokens: total tokens (prompt + completion). | No | + +#### main.V1ModelItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created | integer | *Example:* `1626777600` | No | +| id | string | *Example:* `"gpt-4"` | No | +| object | string | *Example:* `"model"` | No | +| owned_by | string | *Example:* `"openai"` | No | + +#### main.V1ModelsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [main.V1ModelItem](#mainv1modelitem) ] | | No | +| object | string | *Example:* `"list"` | No | +| success | boolean | *Example:* `true` | No | From ad59fc779624d4138d0012d5cbddb49db1820079 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 18:14:58 +0800 Subject: [PATCH 33/78] Update API.md --- docs/llm-service/api/API.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/llm-service/api/API.md b/docs/llm-service/api/API.md index 18f091af..962f2ca7 100644 --- a/docs/llm-service/api/API.md +++ b/docs/llm-service/api/API.md @@ -1,9 +1,9 @@ -# AI API (OpenAI Compatible) +# BANK OF AI LLM API (OpenAI Compatible) OpenAPI spec for /v1/models and /v1/chat/completions (OpenAI format). ## Version: 1.0 -**Schemes:** https: +**Schemes:** https **Host:** api.bankofai.io From 5ee977aca19d752015256f3479e51446daa004d1 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 18:15:19 +0800 Subject: [PATCH 34/78] Delete docs/llm-service/api/Bankofai API.md --- docs/llm-service/api/Bankofai API.md | 171 --------------------------- 1 file changed, 171 deletions(-) delete mode 100644 docs/llm-service/api/Bankofai API.md diff --git a/docs/llm-service/api/Bankofai API.md b/docs/llm-service/api/Bankofai API.md deleted file mode 100644 index 550b607c..00000000 --- a/docs/llm-service/api/Bankofai API.md +++ /dev/null @@ -1,171 +0,0 @@ -# AI API (OpenAI Compatible) -OpenAPI spec for /v1/models and /v1/chat/completions (OpenAI format). - -## Version: 1.0 - -**Schemes:** https - ---- -### /v1/chat/completions - -#### POST -##### Summary - -Create chat completion (OpenAI compatible) - -##### Description - -Chat completion. Auth: Bearer token. Non-stream: JSON with choices[].content. Stream: SSE chunks with choices[].delta.content. - -##### Parameters - -| Name | Located in | Description | Required | Schema | -| ---- | ---------- | ----------- | -------- | ------ | -| Authorization | header | Bearer <token>, e.g. Bearer sk-xxx | Yes | string | -| body | body | Request body (model, messages required; stream, max_tokens, temperature, top_p, stop, n optional) | Yes | [main.ChatCompletionsRequest](#mainchatcompletionsrequest) | - -##### Responses - -| Code | Description | Schema | -| ---- | ----------- | ------ | -| 200 | Non-stream: choices[].content. Stream (SSE): each chunk is ChatCompletionsStreamChunk with choices[].delta.content. | [main.ChatCompletionsResponse](#mainchatcompletionsresponse) | -| 401 | Authentication failed | object | - ---- -### /v1/models - -#### GET -##### Summary - -List models (OpenAI compatible) - -##### Description - -List available models. Auth: Bearer token. Response: object, success, data. - -##### Parameters - -| Name | Located in | Description | Required | Schema | -| ---- | ---------- | ----------- | -------- | ------ | -| Authorization | header | Bearer <token>, e.g. Bearer sk-xxx | Yes | string | - -##### Responses - -| Code | Description | Schema | -| ---- | ----------- | ------ | -| 200 | object: list; success: true; data: array of { id, object, created, owned_by } | [main.V1ModelsResponse](#mainv1modelsresponse) | -| 401 | Authentication failed | object | - ---- -### Models - -#### main.ChatChoice - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| content | string | | No | -| finish_reason | string | *Example:* `"stop"` | No | -| index | integer | | No | - -#### main.ChatCompletionsRequest - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| frequency_penalty | number | FrequencyPenalty: -2.0 to 2.0. Penalize repeated tokens. Default 0. | No | -| max_tokens | integer | MaxTokens: maximum number of tokens that can be generated in the completion. | No | -| messages | [ [main.ChatMessage](#mainchatmessage) ] | Messages: list of messages in the conversation. Required. | No | -| model | string | Model: ID of the model to use (e.g. gpt-4). Required.
*Example:* `"gpt-4"` | No | -| n | integer | N: how many chat completion choices to generate. Default 1. | No | -| presence_penalty | number | PresencePenalty: -2.0 to 2.0. Penalize tokens that appear in the text so far. Default 0. | No | -| response_format | [main.ChatResponseFormat](#mainchatresponseformat) | ResponseFormat: specify output format: { "type": "text" } or { "type": "json_object" } or json_schema. | No | -| seed | integer | Seed: random seed for deterministic sampling (if supported by model). | No | -| stop | | Stop: up to 4 sequences where the API will stop generating. String or array of strings. | No | -| stream | boolean | Stream: if true, partial message deltas will be sent as server-sent events. Default false. | No | -| temperature | number | Temperature: sampling temperature between 0 and 2. Higher = more random. Default 1. | No | -| tool_choice | | ToolChoice: "none" \| "auto" \| { "type": "function", "function": { "name": "..." } }. Controls which tool(s) to call. | No | -| tools | [ [main.ChatTool](#mainchattool) ] | Tools: list of tools the model may call. Each has type "function" and function { name, description?, parameters? }. | No | -| top_p | number | TopP: nucleus sampling: consider tokens with top_p probability mass. Default 1. | No | -| user | string | User: optional end-user identifier for abuse monitoring. | No | - -#### main.ChatCompletionsResponse - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| choices | [ [main.ChatChoice](#mainchatchoice) ] | | No | -| created | integer | *Example:* `1677652288` | No | -| id | string | *Example:* `"chatcmpl-xxx"` | No | -| model | string | *Example:* `"gpt-4"` | No | -| object | string | *Example:* `"chat.completion"` | No | -| usage | [main.ChatUsage](#mainchatusage) | | No | - -#### main.ChatMessage - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| content | string | Content: message content. For tool role, the result of the tool call.
*Example:* `"Hello"` | No | -| name | string | Name: optional name for the message author (e.g. to disambiguate multiple users). | No | -| role | string | Role: "system" \| "user" \| "assistant" \| "tool". System sets behavior; user/assistant are conversation; tool is tool result.
*Example:* `"user"` | No | -| tool_call_id | string | ToolCallId: when role is "tool", the id of the tool call this result is for. Required for tool messages. | No | -| tool_calls | [ [main.ChatToolCallItem](#mainchattoolcallitem) ] | ToolCalls: when role is "assistant" and the model called tools, array of { id, type, function: { name, arguments } }. | No | - -#### main.ChatResponseFormat - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| json_schema | | JsonSchema: when type is json_schema, optional schema for the output. | No | -| type | string | Type: "text" or "json_object". | No | - -#### main.ChatTool - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| function | [main.ChatToolFunction](#mainchattoolfunction) | Function: function definition (name, description, parameters). | No | -| type | string | Type: must be "function".
*Example:* `"function"` | No | - -#### main.ChatToolCallFunction - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| arguments | string | Arguments: JSON string of the arguments. | No | -| name | string | Name: name of the function to call. | No | - -#### main.ChatToolCallItem - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| function | [main.ChatToolCallFunction](#mainchattoolcallfunction) | Function: name and arguments of the call. | No | -| id | string | Id: ID of the tool call. | No | -| type | string | Type: "function".
*Example:* `"function"` | No | - -#### main.ChatToolFunction - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| description | string | Description: optional description for the model. | No | -| name | string | Name: name of the function. | No | -| parameters | | Parameters: optional JSON schema for the function arguments. | No | - -#### main.ChatUsage - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| completion_tokens | integer | CompletionTokens: number of tokens in the completion. | No | -| prompt_tokens | integer | PromptTokens: number of tokens in the prompt. | No | -| total_tokens | integer | TotalTokens: total tokens (prompt + completion). | No | - -#### main.V1ModelItem - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| created | integer | *Example:* `1626777600` | No | -| id | string | *Example:* `"gpt-4"` | No | -| object | string | *Example:* `"model"` | No | -| owned_by | string | *Example:* `"openai"` | No | - -#### main.V1ModelsResponse - -| Name | Type | Description | Required | -| ---- | ---- | ----------- | -------- | -| data | [ [main.V1ModelItem](#mainv1modelitem) ] | | No | -| object | string | *Example:* `"list"` | No | -| success | boolean | *Example:* `true` | No | From b7967212504a5801f4e795fd1e8a184e45f60440 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 18:15:33 +0800 Subject: [PATCH 35/78] Delete docs/llm-service/api/chat-completion.md --- docs/llm-service/api/chat-completion.md | 1 - 1 file changed, 1 deletion(-) delete mode 100644 docs/llm-service/api/chat-completion.md diff --git a/docs/llm-service/api/chat-completion.md b/docs/llm-service/api/chat-completion.md deleted file mode 100644 index 8b137891..00000000 --- a/docs/llm-service/api/chat-completion.md +++ /dev/null @@ -1 +0,0 @@ - From a0643c514a67949532988042aec8e571d2ed82b5 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 18:40:24 +0800 Subject: [PATCH 36/78] Update one-click-script-tutorial.md --- .../openclaw/one-click-script-tutorial.md | 121 ++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/docs/llm-service/openclaw/one-click-script-tutorial.md b/docs/llm-service/openclaw/one-click-script-tutorial.md index 8b137891..d4d44151 100644 --- a/docs/llm-service/openclaw/one-click-script-tutorial.md +++ b/docs/llm-service/openclaw/one-click-script-tutorial.md @@ -1 +1,122 @@ +## Quick Start +**Before running this script, please ensure:** + +1. Node.js 22 or higher is installed +2. OpenClaw is installed and initialized (you have run `openclaw onboard`) +3. Network connection is normal, and the AINFT API is accessible + +### Script Commands + +**Linux & macOS:** + +Mac users: search for "Terminal" in Applications, open it, and enter the command below: + +```bash +curl https://chat.bankofai.io/scripts/openclaw-install-bankofai-provider.sh | bash +``` + +**Windows PowerShell:** + +Windows users: search for "PowerShell" in the Start menu, open it, and enter the command below (CMD is not supported) + +```powershell +iwr https://chat.bankofai.io/scripts/openclaw-install-bankofai-provider.ps1 | iex +``` + +*** + +## Detailed Steps + +### 1. Apply for an API Key + +1. Log in to the [BANK OF AI Platform](https://chat.bankofai.io/) +2. Go to the [API Key Management Page](https://chat.bankofai.io/key) +3. Click to apply for a new API Key + + +![](https://files.readme.io/9011e70e9009bd0e6bcf318f8100e23e7b483a783fc7786b03e03107437d05d6-image.png) + +
+ +*** + +### 2. Run the Installation Script + +Depending on your operating system, execute the corresponding command above. The script will automatically: + +- Check the environment (Node.js, OpenClaw, etc.) +- Prompt you to enter your API Key + +![](https://files.readme.io/1ab2f1ab9d444b570d435296f06270b541186c9d487bc795b2106b873baf7a23-image.png) + +*** + +### 3. Select a Default Model + +After validating the API Key, the script will fetch the list of available models and prompt you to select a default model: + +![](https://files.readme.io/f420e5e6e6214f0e02c56c546a3625f5793bb1dc6e5cb8ef4aed6e547a054308-image.png) + +**:exclamation: Note:exclamation: **Gemini series models are currently largely unusable in OpenClaw due to client fingerprinting strictness for function calls. Please choose with caution. + +*** + +### 4. Complete Configuration + +Once the selection is complete, the script will automatically: + +- Back up the original configuration +- Update the OpenClaw configuration file +- Restart the Gateway + +![](https://files.readme.io/7772254ccf61a1147f9aa87f036ce1045b1077f824dede9a93781a5738c542ba-image.png) + +*** + +### 5. Switch Models + +You can switch the currently used model in two ways: + +- Command Line + +```bash +openclaw models set bankofai/ +``` + +Or manually edit the `~/.openclaw/openclaw.json` configuration file. + +- Web UI - Dashboard + +Visit (18789 is the default port for OpenClaw) in your browser to access the OpenClaw Dashboard. Click "Agent" in the left navigation menu, and select the desired model in the Primary model dropdown: +> **Please note:** Once you change the model via the Dashboard, command-line model switching will no longer work because the Dashboard automatically adds a `list` field to the config file. + +![](https://files.readme.io/0e5b17eef08531b83f513a737522772e52a50602c02ec62d0ef147611f95ac24-image.png) + +
+ +*** + +## Compatibility Testing + +| Operating System | Status | +| :--------------- | :------- | +| Ubuntu 24.04 | ✅ Passed | +| Windows 11 25H2 | ✅ Passed | +| macOS 24.6.0 | ✅ Passed | + +*** + +## FAQ + +**Q: What should I do if the script execution fails?** + +A: Please ensure that: + +1. Node.js 22 or higher is installed +2. OpenClaw is installed and initialized (you have run `openclaw onboard`) +3. Network connection is normal, and the AINFT API is accessible + +**Q: How do I switch models?** + +See point 5 in the **Detailed Steps** above. From 6f38a516b369e1ebd8b094b00431c8b68ad78a89 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 19:06:53 +0800 Subject: [PATCH 37/78] Update one-click-script-tutorial.md --- .../openclaw/one-click-script-tutorial.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/docs/llm-service/openclaw/one-click-script-tutorial.md b/docs/llm-service/openclaw/one-click-script-tutorial.md index d4d44151..6b0cbe2b 100644 --- a/docs/llm-service/openclaw/one-click-script-tutorial.md +++ b/docs/llm-service/openclaw/one-click-script-tutorial.md @@ -4,7 +4,7 @@ 1. Node.js 22 or higher is installed 2. OpenClaw is installed and initialized (you have run `openclaw onboard`) -3. Network connection is normal, and the AINFT API is accessible +3. Network connection is normal, and the BANK OF AI API is accessible ### Script Commands @@ -34,8 +34,7 @@ iwr https://chat.bankofai.io/scripts/openclaw-install-bankofai-provider.ps1 | ie 2. Go to the [API Key Management Page](https://chat.bankofai.io/key) 3. Click to apply for a new API Key - -![](https://files.readme.io/9011e70e9009bd0e6bcf318f8100e23e7b483a783fc7786b03e03107437d05d6-image.png) +![](https://files.readme.io/354a3d414f37e7df28f2cbf92dd055db9b67a20cab8738f6d5ac007226b6931b-image.png)
@@ -48,7 +47,7 @@ Depending on your operating system, execute the corresponding command above. The - Check the environment (Node.js, OpenClaw, etc.) - Prompt you to enter your API Key -![](https://files.readme.io/1ab2f1ab9d444b570d435296f06270b541186c9d487bc795b2106b873baf7a23-image.png) +![](https://files.readme.io/ef091efb8911db673af8a5eade7b281a52f641c93d41fbd57cb898ee91893e76-Image_16-3-2026_at_7.00PM.png) *** @@ -56,7 +55,7 @@ Depending on your operating system, execute the corresponding command above. The After validating the API Key, the script will fetch the list of available models and prompt you to select a default model: -![](https://files.readme.io/f420e5e6e6214f0e02c56c546a3625f5793bb1dc6e5cb8ef4aed6e547a054308-image.png) +![](https://files.readme.io/be3b9162405e38988898261db3effa8adcf92b6d9e37181d36b32c1cf8bcd1e3-Image_16-3-2026_at_7.01PM.png) **:exclamation: Note:exclamation: **Gemini series models are currently largely unusable in OpenClaw due to client fingerprinting strictness for function calls. Please choose with caution. @@ -70,7 +69,9 @@ Once the selection is complete, the script will automatically: - Update the OpenClaw configuration file - Restart the Gateway -![](https://files.readme.io/7772254ccf61a1147f9aa87f036ce1045b1077f824dede9a93781a5738c542ba-image.png) +![](https://files.readme.io/a08c7fcee0cbe906042ef52fefa15348750ad5cd2db8c4f555f92ecabba72761-Image_16-3-2026_at_7.03PM.png) + +
*** @@ -89,9 +90,10 @@ Or manually edit the `~/.openclaw/openclaw.json` configuration file. - Web UI - Dashboard Visit (18789 is the default port for OpenClaw) in your browser to access the OpenClaw Dashboard. Click "Agent" in the left navigation menu, and select the desired model in the Primary model dropdown: + > **Please note:** Once you change the model via the Dashboard, command-line model switching will no longer work because the Dashboard automatically adds a `list` field to the config file. -![](https://files.readme.io/0e5b17eef08531b83f513a737522772e52a50602c02ec62d0ef147611f95ac24-image.png) +![](https://files.readme.io/3668289b53d185d158dd8393f46c0e171c0d301b5bdc624792c8cf8e6f9c4936-16-3-26_6.56.png)
@@ -115,7 +117,7 @@ A: Please ensure that: 1. Node.js 22 or higher is installed 2. OpenClaw is installed and initialized (you have run `openclaw onboard`) -3. Network connection is normal, and the AINFT API is accessible +3. Network connection is normal, and the BANK OF AI API is accessible **Q: How do I switch models?** From 6287159915b3621b8236eca587db7274046066a8 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 19:10:23 +0800 Subject: [PATCH 38/78] Update one-click-script-tutorial.md --- docs/llm-service/openclaw/one-click-script-tutorial.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/llm-service/openclaw/one-click-script-tutorial.md b/docs/llm-service/openclaw/one-click-script-tutorial.md index 6b0cbe2b..6d1375e9 100644 --- a/docs/llm-service/openclaw/one-click-script-tutorial.md +++ b/docs/llm-service/openclaw/one-click-script-tutorial.md @@ -57,7 +57,7 @@ After validating the API Key, the script will fetch the list of available models ![](https://files.readme.io/be3b9162405e38988898261db3effa8adcf92b6d9e37181d36b32c1cf8bcd1e3-Image_16-3-2026_at_7.01PM.png) -**:exclamation: Note:exclamation: **Gemini series models are currently largely unusable in OpenClaw due to client fingerprinting strictness for function calls. Please choose with caution. +>**Note:** Gemini series models are currently largely unusable in OpenClaw due to client fingerprinting strictness for function calls. Please choose with caution. *** @@ -91,7 +91,7 @@ Or manually edit the `~/.openclaw/openclaw.json` configuration file. Visit (18789 is the default port for OpenClaw) in your browser to access the OpenClaw Dashboard. Click "Agent" in the left navigation menu, and select the desired model in the Primary model dropdown: -> **Please note:** Once you change the model via the Dashboard, command-line model switching will no longer work because the Dashboard automatically adds a `list` field to the config file. +> **Note:** Once you change the model via the Dashboard, command-line model switching will no longer work because the Dashboard automatically adds a `list` field to the config file. ![](https://files.readme.io/3668289b53d185d158dd8393f46c0e171c0d301b5bdc624792c8cf8e6f9c4936-16-3-26_6.56.png) From ee522cfed5c37a255c301868c11aada460b492a8 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 22:40:52 +0800 Subject: [PATCH 39/78] Create glm-5.md --- docs/llm-service/models/glm-5.md | 47 ++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 docs/llm-service/models/glm-5.md diff --git a/docs/llm-service/models/glm-5.md b/docs/llm-service/models/glm-5.md new file mode 100644 index 00000000..cd7e1682 --- /dev/null +++ b/docs/llm-service/models/glm-5.md @@ -0,0 +1,47 @@ +# GLM-5 + +## Overview +**GLM-5** is Zhipu AI's new-generation flagship foundation model, specifically designed for **Coding** and **Agent** scenarios. It achieves State-Of-The-Art (SOTA) performance in open-source complex system engineering and long-horizon tasks, with a real-world coding experience approaching **Claude Opus** level. + +Based on a **744B** scale foundation model, combined with asynchronous reinforcement learning and sparse attention mechanisms, GLM-5 marks a paradigm shift from "writing code" to "building systems". + +--- + +## Key Features +* **Parameter Scale and Data Volume:** The base model's parameter scale has expanded to **744B** (with **40B** activated parameters), and pre-training data has increased to **28.5T**, significantly enhancing the model's breadth and depth of knowledge. +* **Ultra-Long Context and Output:** Supports a context window of up to **200K tokens** and a maximum output length of **128K tokens**, enabling excellent performance in handling complex code repositories and multi-step tasks. +* **Exceptional Coding & Agent Capabilities:** Systematically strengthened programming capabilities, excelling in code generation with low hallucination rates and efficient token utilization. +* **Multiple Thinking Modes:** Offers various thinking modes to support more flexible and in-depth problem-solving. + +--- + +## Best Use Cases +1. **Complex System Engineering:** Construction and management of complex software systems, assisting in system design and optimization. +2. **Long-Horizon Agent Tasks:** Agent tasks requiring multi-step planning, execution, and feedback (e.g., automated workflows). +3. **High-Precision Code Debugging:** Provides human-level coding assistance to improve development efficiency. +4. **Large-Scale Document Analysis:** Deep information extraction and summarization for massive document sets. + +--- + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Extremely Strong.** Excels in complex logical reasoning and multi-step planning. | +| **Creative Ability** | **Extremely Strong.** Particularly adept at code generation and system design. | +| **Multimodal Ability** | Primarily focuses on text/code; can be integrated with visual tools on Zhipu platform. | +| **Response Speed** | **30-50 tokens/s.** Balances high-quality output with efficient speed. | +| **Context Window** | 200K Tokens | +| **Max Output** | 128K Tokens | + +--- + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **GLM-5** | 0.30 | 2.55 | + +--- + +> **Note:** For optimal performance in coding tasks, it is recommended to provide clear system prompts and utilize the 128K output capacity for building complete modules. From 6862b97b3f50a82a3f51603caa585888c93bebb9 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 22:47:35 +0800 Subject: [PATCH 40/78] Create kimi-k2.5.md --- docs/llm-service/models/kimi-k2.5.md | 48 ++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 docs/llm-service/models/kimi-k2.5.md diff --git a/docs/llm-service/models/kimi-k2.5.md b/docs/llm-service/models/kimi-k2.5.md new file mode 100644 index 00000000..9cdbe112 --- /dev/null +++ b/docs/llm-service/models/kimi-k2.5.md @@ -0,0 +1,48 @@ +# Kimi-K2.5 + +## Core Overview +**Kimi-K2.5** is Moonshot AI’s most versatile model to date, featuring a **native multimodal architecture** that simultaneously supports visual and text input, thinking and non-thinking modes, and conversational and Agent tasks. + +With its **256K ultra-long context window**, multimodal understanding, and advanced Tool Calling capabilities, it sets a new benchmark in open-source visual programming and Agent clusters, empowering developers to build next-generation AI applications. + +--- + +## Key Features +* **Native Multimodal Architecture:** Supports mixed input of visual and text, excels in image recognition and visual programming. +* **256K Ultra-Long Context Window:** Provides a **256,000 token** window, supporting long-form reasoning and processing of massive datasets. +* **Agent Clusters & Tool Calling:** Supports a preview version of Agent clusters (up to **100 sub-agents** and **1,500 tool calls**), operating 4.5x faster than single-agent configurations. +* **Exceptional Coding Capabilities:** Leading performance in SWE-Bench and LiveCodeBench, offering competitive programming skills at a fraction of the cost of comparable models. +* **Thinking Modes:** Flexible switching between quick response and deep reasoning/planning modes. + +--- + +## Best Use Cases +1. **Visual Programming & Automation:** Pixel-level webpage replication and expert-level office task automation. +2. **Ultra-Long Text Analysis:** Legal document review, massive research report analysis, and full codebase understanding. +3. **Multi-Agent Collaboration:** Building complex automated workflows involving multiple specialized sub-agents. +4. **Professional Code Generation:** High-efficiency code generation, optimization, and deep debugging for developers. + +--- + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Extremely Strong.** Excels in long-context reasoning and Agent task planning. | +| **Creative Ability** | **Extremely Strong.** Adept at visual programming and multimodal content creation. | +| **Multimodal Ability** | **Native Multimodal.** Outstanding performance in visual understanding and input. | +| **Response Speed** | Fast in quick mode; highly efficient parallel processing in Agent cluster mode. | +| **Context Window** | 256,000 Tokens | +| **Max Output** | 256,000 Tokens | + +--- + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **Kimi-K2.5** | 0.23 | 3.00 | + +--- + +> **Pro Tip:** When using Kimi-K2.5 for complex system design, try leveraging its **Thinking Mode** for architecture planning before switching to standard mode for rapid code execution. From 6a6b9c938379c238452d021653135c685299bc4c Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Mon, 16 Mar 2026 22:51:40 +0800 Subject: [PATCH 41/78] Create minimax-m2.5.md --- docs/llm-service/models/minimax-m2.5.md | 48 +++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 docs/llm-service/models/minimax-m2.5.md diff --git a/docs/llm-service/models/minimax-m2.5.md b/docs/llm-service/models/minimax-m2.5.md new file mode 100644 index 00000000..fde22f68 --- /dev/null +++ b/docs/llm-service/models/minimax-m2.5.md @@ -0,0 +1,48 @@ +# MiniMax-M2.5 + +## Core Overview +**MiniMax-M2.5** is MiniMax's independently developed flagship multimodal general large model, designed for **high-throughput and low-latency** production environments. + +It achieves industry-leading performance in coding and Agent capabilities, with the ability to natively understand, generate, and integrate multiple modalities including **text, audio, images, video, and music**. M2.5 aims to provide top-tier performance at extremely low costs, excelling particularly in complex task processing and professional office scenarios. + +--- + +## Key Features +* **Industry-Leading Coding & Agent Capabilities:** Achieved best-in-industry performance on the **Multi-SWE-Bench** benchmark, demonstrating higher decision-making maturity and more efficient token utilization. +* **Efficient Multimodal Processing:** Natively supports the integration of text, audio, images, video, and music, providing a truly rich multimodal interactive experience. +* **Ultra-Long Context Processing:** Features a substantial context window of **197K tokens**, optimized through reinforcement learning for precise task decomposition. +* **High Throughput, Low Latency:** Optimized for production with **100 TPS** and **50 TPS** versions. Pricing is significantly lower (1/10 to 1/20) than comparable models. +* **Enhanced Office Scenarios:** Significant capability improvements in handling professional software tasks such as Word, PPT, and Excel financial modeling. + +--- + +## Best Use Cases +1. **Enterprise-Level Automated Workflows:** Ideal for automation requiring fast multimodal processing and complex Agent decision-making. +2. **Software Development & Code Assistance:** Industry-leading generation and debugging, especially in large, complex codebases. +3. **Multimodal Content Creation:** Innovative cross-modal generation (e.g., text-to-video or image-to-music integration). +4. **Advanced Office Document Processing:** High-efficiency information extraction and modeling for Word, Excel, and PPT. + +--- + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Extremely Strong.** High decision-making maturity for multi-step Agent tasks. | +| **Creative Ability** | **Extremely Strong.** Proficient in multimodal creation and office document automation. | +| **Multimodal Ability** | **Native Multimodal.** Supports text, audio, images, video, and music. | +| **Response Speed** | **Extremely Fast.** Offers 100 TPS and 50 TPS versions for high-throughput needs. | +| **Context Window** | 197,000 Tokens | +| **Max Output** | 131,000 Tokens | + +--- + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **MiniMax-M2.5** | 0.30 | 1.20 | + +--- + +> **Pro Tip:** MiniMax-M2.5 is exceptionally cost-effective for **high-volume production API calls**. If your workflow requires processing thousands of documents per hour, M2.5 provides the best balance of speed and budget. From 4b1d7b327abd0f2b6c8fdd5bc75bd1321ae14801 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 14:08:13 +0800 Subject: [PATCH 42/78] Update integration-guide.md --- .../llm-service/openclaw/integration-guide.md | 332 +----------------- 1 file changed, 6 insertions(+), 326 deletions(-) diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index 0c25886e..d68008b6 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -1,331 +1,11 @@ -# Integrating OpenClaw with an LLM Gateway +# Integrating OpenClaw with Bank of AI -## Table of Contents +> **From Zero to Private Agent: Deploying OpenClaw with Bank of AI in 15 Minutes** -- [Overview](#overview) -- [Step 1: Obtain an API Key](#step-1-obtain-an-api-key) -- [Step 2: Prepare Your System](#step-2-prepare-your-system) -- [Step 3: Install OpenClaw](#step-3-install-openclaw) -- [Step 4: Run the Initialization Wizard](#step-4-run-the-initialization-wizard) -- [Step 5: Configure the LLM Gateway](#step-5-configure-the-llm-gateway) -- [Step 6: Gateway Commands](#step-6-gateway-commands) -- [Step 7: Launch OpenClaw](#step-7-launch-openclaw) -- [Step 8: Useful CLI Commands](#step-8-useful-cli-commands) -- [Next Steps](#next-steps) +OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant. Unlike cloud-based SaaS tools, OpenClaw runs locally on your own machine, giving you full control over your data and workflows. You can interact with it through familiar messaging platforms like WhatsApp, Telegram, Lark, and DingTalk to handle emails, manage calendars, write code, or even automate your smart home. ---- +OpenClaw is more than just a chatbot; it's a truly functional "agent" designed for real-world execution. It features persistent memory, access to your local file system and the internet, and the ability to grow more powerful by expanding its "skills". -# Overview +Because it's open-source and self-hosted, OpenClaw has attracted a vibrant community of developers and tech enthusiasts. The community has pioneered creative use cases, from automating business operations to managing personal life—showcasing the immense potential of a truly personal AI. -OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant that runs locally on your machine. - -Unlike cloud-only AI assistants, OpenClaw gives you control over: - -- Local memory -- Filesystem access -- External tools -- Automation workflows - -It can connect to messaging platforms such as: - -- Telegram -- WhatsApp -- Lark -- DingTalk - -This guide explains how to install OpenClaw and connect it to an **LLM Gateway** that exposes an **OpenAI-compatible API**. - -Once connected, OpenClaw can use external large language models through the gateway. - ---- - -# Step 1: Obtain an API Key - -Before integrating OpenClaw with the **LLM Gateway**, you need an API key. - -Visit the API key management page: - -https://chat.bankofai.io/key - -Steps: - -1. Sign in to your account. -2. Generate or copy your **API Key**. -3. Store it securely. - -You will use this key when configuring the gateway provider inside OpenClaw. - ---- - -# Step 2: Prepare Your System - -Make sure your environment meets the following requirements. - -| Requirement | Details | -|---|---| -| Node.js | Version **20 LTS or higher** | -| Operating System | macOS / Linux / Windows (WSL2 recommended) | -| Package Manager | npm | - -Check Node.js version: - -```bash -node -v -``` - -![Check Node.js version](https://files.readme.io/ac27744855c7066d117a856e7005166662e707462312b1925c4e368f5c9c7427-1.png) - -If your version is below 20, install or upgrade Node.js. - ---- - -# Step 3: Install OpenClaw - -Install OpenClaw globally: - -```bash -npm install -g openclaw -``` - -Verify installation: - -```bash -openclaw --version -``` - -![Verify OpenClaw installation](https://files.readme.io/6f39989ed49f307a5168bd934c4ffaf2a79db8a2268c6959c3bb731f43138598-2.png) - ---- - -## Troubleshooting Installation - -### Sharp module error - -Run: - -```bash -npm install -g openclaw --force -``` - -### openclaw command not found - -Find npm global path: - -```bash -npm config get prefix -``` - -Ensure the path is included in your shell `PATH`. - ---- - -# Step 4: Run the Initialization Wizard - -Start the onboarding wizard: - -```bash -openclaw onboard -``` - -The wizard will guide you through several setup steps. - ---- - -## 4.1 Model Provider - -When asked to choose a model provider, select: - -**Skip for now** - -We skip this step because the **LLM Gateway will be configured manually in the configuration file**. - -![Skip model provider](https://files.readme.io/458a23b58f79ec97f4fee7ed6668a6a799cb37f95be57ddfe96830ad77bb2bb5-4.png) - ---- - -## 4.2 Communication Channels - -Choose messaging channels if desired. - -You can skip this step and add them later. - -![Skip channels](https://files.readme.io/178a9aded9bb4934dcaf7445c0c4edf4569836183ac6019b8c8d08e2917b4549-5.png) - ---- - -## 4.3 Skills - -For beginners, select: - -**No** - -You can enable skills later. - -![Skip skills](https://files.readme.io/d4155d69eb1dd69c1586fa5ab844c602914b0b5a1934265bd97e1f8444b40b5a-6.png) - ---- - -# Step 5: Configure the LLM Gateway - -Open the configuration file: - -```bash -~/.openclaw/openclaw.json -``` - -Add the gateway provider configuration. - -Example: - -```json -{ - "models": { - "mode": "merge", - "providers": { - "llm-gateway": { - "base_url": "https://api.example.com/v1", - "api_key": "{YOUR_API_KEY}", - "api": "openai-completions", - "models": [ - { "id": "gpt-5.2", "name": "gpt-5.2" }, - { "id": "gpt-5-mini", "name": "gpt-5-mini" }, - { "id": "gpt-5-nano", "name": "gpt-5-nano" }, - { "id": "claude-opus-4.6", "name": "claude-opus-4.6" }, - { "id": "claude-sonnet-4.6", "name": "claude-sonnet-4.6" }, - { "id": "claude-haiku-4.5", "name": "claude-haiku-4.5" } - ] - } - } - } -} -``` - -Replace: - -- `base_url` -- `api_key` - -with your real gateway endpoint and API key. - ---- - -## Set Default Model - -In the same configuration file: - -```json -{ - "agents": { - "default": { - "model": "llm-gateway/gpt-5-nano" - } - } -} -``` - ---- - -## Restart Gateway - -```bash -openclaw gateway restart -``` - ---- - -## Test the Connection - -```bash -openclaw agent --agent main --message "How are you doing today?" -``` - -If the gateway is configured correctly, the agent will respond. - -![Successful response](https://files.readme.io/312c6feb51ea9f4071b75efd3182ef0507a6981baebd3bf7e8d57dec33978efd-7.png) - ---- - -# Step 6: Gateway Commands - -| Action | Command | -|---|---| -| Install Gateway | `openclaw gateway install` | -| Start Gateway | `openclaw gateway start` | -| Stop Gateway | `openclaw gateway stop` | -| Restart Gateway | `openclaw gateway restart` | -| Check Status | `openclaw gateway status` | - ---- - -# Step 7: Launch OpenClaw - -## Web Dashboard - -Start the web interface: - -```bash -openclaw ui -``` - -OpenClaw will display the **local access URL** in the terminal. - ---- - -## Terminal UI - -Launch the terminal interface: - -```bash -openclaw tui -``` - -![OpenClaw TUI](https://files.readme.io/9ca9be0db1c21be12488934030c8ef3076c17fbb4fc26cfdb0e71425e46d121d-8.png) - -Useful commands: - -| Command | Description | -|---|---| -| `/status` | View system status | -| `/session` | Switch session | -| `/model` | Change model | -| `/help` | Show command list | - ---- - -# Step 8: Useful CLI Commands - -Check model status: - -```bash -openclaw models status -``` - -List channels: - -```bash -openclaw channels list -``` - -Search memory: - -```bash -openclaw memory search "keyword" -``` - -Open documentation: - -```bash -openclaw docs -``` - ---- - -# Next Steps - -You can now extend your OpenClaw setup by: - -- Adding messaging channels (e.g. Telegram) -- Enabling skills -- Connecting external APIs -- Customizing model routing -- Building automated AI workflows +This guide will walk you through everything from scratch: downloading, installing, and setting up OpenClaw, as well as connecting it to the Bank of AI API. By the end, you'll have built your very own AI assistant. From 7282f61f2c87fc756a2c6707a120e39a8987a06e Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 14:29:16 +0800 Subject: [PATCH 43/78] Update integration-guide.md --- .../llm-service/openclaw/integration-guide.md | 169 +++++++++++++++++- 1 file changed, 164 insertions(+), 5 deletions(-) diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index d68008b6..0a703517 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -1,11 +1,170 @@ # Integrating OpenClaw with Bank of AI -> **From Zero to Private Agent: Deploying OpenClaw with Bank of AI in 15 Minutes** +> **From Zero to Private Agent: Deploy OpenClaw with Bank of AI in 15 Minutes** -OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant. Unlike cloud-based SaaS tools, OpenClaw runs locally on your own machine, giving you full control over your data and workflows. You can interact with it through familiar messaging platforms like WhatsApp, Telegram, Lark, and DingTalk to handle emails, manage calendars, write code, or even automate your smart home. +OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant that runs locally on your machine. Unlike cloud-based SaaS tools, it gives you full control over your data and workflows. -OpenClaw is more than just a chatbot; it's a truly functional "agent" designed for real-world execution. It features persistent memory, access to your local file system and the internet, and the ability to grow more powerful by expanding its "skills". +You can interact with OpenClaw through familiar platforms such as WhatsApp, Telegram, Lark, and DingTalk to handle emails, manage calendars, write code, or automate tasks. -Because it's open-source and self-hosted, OpenClaw has attracted a vibrant community of developers and tech enthusiasts. The community has pioneered creative use cases, from automating business operations to managing personal life—showcasing the immense potential of a truly personal AI. +OpenClaw is more than a chatbot — it is a fully functional AI agent with: -This guide will walk you through everything from scratch: downloading, installing, and setting up OpenClaw, as well as connecting it to the Bank of AI API. By the end, you'll have built your very own AI assistant. +- Persistent memory +- Access to your local files and the internet +- Extensible capabilities through "skills" + +Thanks to its open-source and self-hosted nature, OpenClaw has built a strong developer community with use cases ranging from business automation to personal productivity. + +This guide walks you through installing OpenClaw, configuring it, and connecting it to the **Bank of AI API**. By the end, you will have your own private AI agent running locally. + +--- + +## Step 1: Get Your Bank of AI API Key + +1. Log in to [Bank of AI Chat](https://chat.bankofai.io/chat) +2. Go to the **API Key Management** page +3. Generate your **API Key** `api_key` +4. Save it securely — you will need it later + +--- + +## Step 2: Prepare Your System + +Before installing OpenClaw, make sure your system meets the following requirements. + +OpenClaw is designed for Unix-like systems, but also works on Windows via **WSL2 (Windows Subsystem for Linux 2)**. + +### Requirements + +| Requirement | Details | +|------------|--------| +| Node.js | Version 22 or higher | +| Operating System | macOS, Linux, or Windows (WSL2) | +| Package Manager | npm (recommended) or pnpm | + +### Check Your Environment + +Run: + +```bash +node -v +``` + +If the version is below **v22.0.0**, or the command is not found, install or upgrade Node.js. + +--- + +## Step 3: Install OpenClaw + +The easiest way to install OpenClaw is via npm: + +```bash +npm install -g openclaw +``` + +This will: + +- Install OpenClaw globally +- Set up required dependencies +- Enable the `openclaw` command + +--- + +### Troubleshooting + +#### Sharp Module Error + +On some systems (especially macOS with Homebrew), you may encounter issues with the `sharp` module. + +Fix it by forcing prebuilt binaries: + +```bash +npm install -g openclaw --force +``` + +--- + +#### `openclaw: command not found` + +This means your system cannot find global npm binaries. + +1. Check npm global path: + +```bash +npm config get prefix +``` + +2. Example output: + +```text +/usr/local +``` + +3. Add it to PATH: + +```bash +export PATH="/usr/local/bin:$PATH" +``` + +4. Apply changes: + +```bash +source ~/.zshrc +``` + +(or `~/.bashrc` depending on your shell) + +--- + +## Step 4: Complete the Initialization Wizard + +After installation, OpenClaw should start an onboarding wizard automatically. + +If not, run: + +```bash +openclaw onboard +``` + +--- + +### Wizard Steps + +#### 1. AI Model Configuration + +You will be asked to provide an API key for model providers. + +👉 Select: + +```text +Skip for now +``` + +We will configure **Bank of AI** in the next step. + +--- + +#### 2. Communication Channels + +Select the platforms you want to use: + +- Telegram +- WhatsApp +- Lark + +--- + +#### 3. Skills + +Recommended: + +```text +No +``` + +Use **Space** to select and **Enter** to confirm. + +--- + +### After Setup + +Once the wizard completes and the UI launches, you are ready to connect OpenClaw to the **Bank of AI API**. From ec0cb427458a9e233335235138a5cd3b80889d59 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 14:57:47 +0800 Subject: [PATCH 44/78] Update integration-guide.md --- .../llm-service/openclaw/integration-guide.md | 421 ++++++++++++++---- 1 file changed, 342 insertions(+), 79 deletions(-) diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index 0a703517..54fdc07b 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -1,170 +1,433 @@ -# Integrating OpenClaw with Bank of AI +# Integrating OpenClaw with BankOfAI +## From Zero to Private Agent: Deploying OpenClaw with BankOfAI in 15 Minutes -> **From Zero to Private Agent: Deploy OpenClaw with Bank of AI in 15 Minutes** +OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant. Unlike cloud-based SaaS tools, OpenClaw runs locally on your own machine, giving you full control over your data and workflows. You can interact with it through familiar messaging platforms like WhatsApp, Telegram, Lark, and DingTalk to handle emails, manage calendars, write code, or even automate your smart home. -OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant that runs locally on your machine. Unlike cloud-based SaaS tools, it gives you full control over your data and workflows. +OpenClaw is more than just a chatbot; it's a truly functional "agent" designed for real-world execution. It features persistent memory, access to your local file system and the internet, and the ability to grow more powerful by expanding its "skills". -You can interact with OpenClaw through familiar platforms such as WhatsApp, Telegram, Lark, and DingTalk to handle emails, manage calendars, write code, or automate tasks. +Because it's open-source and self-hosted, OpenClaw has attracted a vibrant community of developers and tech enthusiasts. The community has pioneered creative use cases, from automating business operations to managing personal life—showcasing the immense potential of a truly personal AI. -OpenClaw is more than a chatbot — it is a fully functional AI agent with: - -- Persistent memory -- Access to your local files and the internet -- Extensible capabilities through "skills" - -Thanks to its open-source and self-hosted nature, OpenClaw has built a strong developer community with use cases ranging from business automation to personal productivity. - -This guide walks you through installing OpenClaw, configuring it, and connecting it to the **Bank of AI API**. By the end, you will have your own private AI agent running locally. +This guide will walk you through everything from scratch: downloading, installing, and setting up OpenClaw, as well as connecting it to the BankOfAI API. By the end, you'll have built your very own AI assistant. --- -## Step 1: Get Your Bank of AI API Key +# Step 1: Get Your BankOfAI API Key -1. Log in to [Bank of AI Chat](https://chat.bankofai.io/chat) -2. Go to the **API Key Management** page -3. Generate your **API Key** `api_key` -4. Save it securely — you will need it later - ---- +Log in to https://chat.bankofai.io/chat. -## Step 2: Prepare Your System +Navigate to the API key management page and apply for your `api_key`. -Before installing OpenClaw, make sure your system meets the following requirements. +--- -OpenClaw is designed for Unix-like systems, but also works on Windows via **WSL2 (Windows Subsystem for Linux 2)**. +# Step 2: Prepare Your System -### Requirements +Before installing, make sure your system meets these basic requirements. OpenClaw is designed for Unix-like environments but runs perfectly on Windows via WSL2 (Windows Subsystem for Linux 2). | Requirement | Details | -|------------|--------| -| Node.js | Version 22 or higher | -| Operating System | macOS, Linux, or Windows (WSL2) | -| Package Manager | npm (recommended) or pnpm | - -### Check Your Environment +|---|---| +| Node.js | Version 22 or higher. This is the runtime environment for OpenClaw. | +| Operating System | macOS, Linux, or Windows (via WSL2). | +| Package Manager | pnpm is required to compile from source. For a standard install, npm (which comes with Node.js) is recommended. | -Run: +To check your environment, open a terminal and run: -```bash +``` node -v ``` -If the version is below **v22.0.0**, or the command is not found, install or upgrade Node.js. +If the version is lower than **v22.0.0**, or you see a **"command not found"** error, please install or upgrade Node.js from the official website. --- -## Step 3: Install OpenClaw +# Step 3: Install OpenClaw + +OpenClaw supports several installation methods. For beginners, the official one-line installation script is the best choice, as it automatically handles most of the setup. + +This method is the quickest and easiest as it will detect your OS, install dependencies, and make the `openclaw` command available globally. -The easiest way to install OpenClaw is via npm: +For macOS or Linux terminals, execute the following: -```bash +``` npm install -g openclaw ``` -This will: +For Windows users using PowerShell, run the same command: -- Install OpenClaw globally -- Set up required dependencies -- Enable the `openclaw` command +``` +npm install -g openclaw +``` --- -### Troubleshooting +## Troubleshooting Common Errors -#### Sharp Module Error +### Problem 1: Sharp Module Error -On some systems (especially macOS with Homebrew), you may encounter issues with the `sharp` module. +On some systems—especially macOS where **libvips** was installed via Homebrew—you might encounter an error with the **sharp module** (an image processing library). -Fix it by forcing prebuilt binaries: +To fix this, try forcing the installation of pre-built binaries, which bypasses local compilation: -```bash +``` npm install -g openclaw --force ``` --- -#### `openclaw: command not found` +### Problem 2: "Command Not Found" -This means your system cannot find global npm binaries. +After installation, you might see: -1. Check npm global path: +``` +openclaw: command not found +``` + +This usually means your system can't find where globally installed npm packages are located. -```bash +Find npm's global installation path by running: + +``` npm config get prefix ``` -2. Example output: +If the output is, for example: -```text +``` /usr/local ``` -3. Add it to PATH: +Then your binaries live in: + +``` +/usr/local/bin +``` + +You will need to add this path to your shell profile (`~/.zshrc` or `~/.bashrc`): -```bash +``` export PATH="/usr/local/bin:$PATH" ``` -4. Apply changes: +After saving the file, restart your terminal or run: -```bash +``` source ~/.zshrc ``` -(or `~/.bashrc` depending on your shell) +(or your specific config file) + +The `openclaw` command should now work. --- -## Step 4: Complete the Initialization Wizard +# Step 4: Complete the Initialization Wizard -After installation, OpenClaw should start an onboarding wizard automatically. +After installation, the onboarding wizard should trigger automatically. -If not, run: +If you accidentally closed the window, you can restart the wizard (and install the background daemon) by running: -```bash +``` openclaw onboard ``` +The wizard will walk you through three primary sections: + +### AI Model Configuration + +The wizard will request an API key for the large language model service (Anthropic Claude, OpenAI GPT, etc.). + +→ For now, select **"Skip for now"**. We'll set this up manually in the next step. + +### Communication Channels + +Choose which messaging apps you want to use to talk to OpenClaw. + +Examples: + +- Telegram +- WhatsApp + +### Skills + +We recommend selecting **No**. + +(Use the **Spacebar** to toggle selections and **Enter** to confirm) + +You can add skills later. + +Once the wizard finishes and the OpenClaw UI launches, you will need to manually edit the configuration file to connect it to **BankOfAI**. + --- -### Wizard Steps +# Step 5: Configure the BankOfAI Model + +After completing the onboarding wizard, you'll need to manually add your **BankOfAI configuration** to OpenClaw and set it as the default model. -#### 1. AI Model Configuration +There are two ways to complete the configuration: -You will be asked to provide an API key for model providers. +- One-Click Script +- Manual Configuration (below) -👉 Select: +--- + +## 5.1 Edit the Configuration File + +Open the configuration file located at: -```text -Skip for now ``` +~/.openclaw/openclaw.json +``` + +OpenClaw reads this file at startup to load all its LLM configurations. -We will configure **Bank of AI** in the next step. +Locate the `"models"` section and merge the following JSON snippet. + +Be sure to replace `{BANKOFAI_API_KEY}` with the unique key generated in your API key management dashboard. + +``` +{ + "models": { + "mode": "merge", + "providers": { + "bankofai": { + "baseUrl": "https://api.bankofai.io/v1/", + "apiKey": "{BANKOFAI_API_KEY}", + "api": "openai-completions", + "models": [ + { + "id": "gpt-5.2", + "name": "gpt-5.2" + }, + { + "id": "gpt-5-mini", + "name": "gpt-5-mini" + }, + { + "id": "gpt-5-nano", + "name": "gpt-5-nano" + }, + { + "id": "claude-opus-4.6", + "name": "claude-opus-4.6" + }, + { + "id": "claude-sonnet-4.6", + "name": "claude-sonnet-4.6" + }, + { + "id": "claude-haiku-4.5", + "name": "claude-haiku-4.5" + } + ] + } + } + } +} +``` --- -#### 2. Communication Channels +## 5.2 Set the Default Model -Select the platforms you want to use: +In the same `openclaw.json` file, locate the `agents` section and set the default model. -- Telegram -- WhatsApp -- Lark +Example: + +``` +{ + "agents": { + "default": { + "model": "bankofai/gpt-5-nano" + } + } +} +``` + +--- + +## 5.3 Restart the Gateway + +For the configuration changes to take effect, restart the OpenClaw gateway: + +``` +openclaw gateway restart +``` --- -#### 3. Skills +## 5.4 Test the Connection + +Send a test message from your terminal: + +``` +openclaw chat "How are you doing today?" +``` -Recommended: +Or: -```text -No ``` +openclaw agent --agent main --message "How are you doing today?" +``` + +If you receive a coherent response, congratulations — you have successfully connected **OpenClaw to BankOfAI**. + +--- + +# Step 6: Understand Gateway and Diagnostic Commands + +If you encounter issues during configuration or while running the program, it helps to understand what the **Gateway** is and how to use the built-in diagnostic tools. + +--- + +## What is the Gateway? + +During setup, you'll frequently encounter the term **Gateway**. -Use **Space** to select and **Enter** to confirm. +| Action | Command | +|---|---| +| Install the Gateway | `openclaw gateway install` | +| Start the Gateway | `openclaw gateway start` | +| Stop the Gateway | `openclaw gateway stop` | +| Restart the Gateway | `openclaw gateway restart` | +| Uninstall the Gateway | `openclaw gateway uninstall` | +| Check Gateway Status | `openclaw gateway status` | --- -### After Setup +## Diagnostic Commands + +After onboarding and updating your configuration file, run the following diagnostic commands. + +| Command | Description | +|---|---| +| openclaw doctor | Performs a comprehensive health check of your system environment and configuration | +| openclaw gateway status | Shows real-time information about the Gateway | + +Example: + +``` +openclaw doctor +``` + +Follow the prompts in the output to resolve any errors. + +You can also monitor the system status using: + +``` +openclaw gateway status +``` + +If the Gateway is functioning correctly, the output will display a **Healthy** status. + +--- + +# Step 7: Launch OpenClaw + +With the configuration complete, you can interact with your AI assistant through either a **web dashboard** or a **terminal interface**. + +--- + +## Option 1: Web Dashboard + +OpenClaw's built-in web interface is called the **Dashboard**. + +First ensure the Gateway is running, then execute: + +``` +openclaw ui +``` + +This command will generate a URL containing a temporary login token and open it in your default web browser. + +Default address: + +``` +http://127.0.0.1:18789 +``` + +From the Dashboard you can: + +- Chat with your AI assistant +- View chat history +- Configure models, channels, and skills +- Monitor system status + +--- + +## Option 2: Terminal UI (TUI) + +For users who prefer working in the terminal: + +``` +openclaw tui +``` + +This launches a full-screen interface with: + +- Chat history +- Real-time system status +- Interactive input box + +--- + +### TUI Commands + +| Command | Description | +|---|---| +| /status | View current system status | +| /session | Switch chat session | +| /model | Change model | +| /help | List all commands | + +--- + +# Step 8: Master Essential Commands + +The OpenClaw CLI is powerful for automation and configuration. + +--- + +## 1. Check Model Status + +``` +openclaw models status +``` + +Use this to verify: + +- API key validity +- model connectivity +- expiration status + +--- + +## 2. Manage Communication Channels + +``` +openclaw channels list +``` + +Displays all connected messaging platforms and their status. + +--- + +## 3. Query Long-Term Memory + +``` +openclaw memory search "keyword" +``` + +Search semantic memory stored by your AI assistant. + +--- + +## 4. View Documentation + +``` +openclaw docs +``` + +Opens the official documentation portal. + +--- -Once the wizard completes and the UI launches, you are ready to connect OpenClaw to the **Bank of AI API**. +You now have a fully functioning **OpenClaw AI agent powered by BankOfAI**. From 8cc3f8898dee92c6d86ad96288b2a9e94fc90962 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 15:01:03 +0800 Subject: [PATCH 45/78] Update integration-guide.md --- .../llm-service/openclaw/integration-guide.md | 379 ++++++------------ 1 file changed, 118 insertions(+), 261 deletions(-) diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index 54fdc07b..c99d168e 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -1,189 +1,125 @@ # Integrating OpenClaw with BankOfAI ## From Zero to Private Agent: Deploying OpenClaw with BankOfAI in 15 Minutes -OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant. Unlike cloud-based SaaS tools, OpenClaw runs locally on your own machine, giving you full control over your data and workflows. You can interact with it through familiar messaging platforms like WhatsApp, Telegram, Lark, and DingTalk to handle emails, manage calendars, write code, or even automate your smart home. +OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant that runs locally on your machine. Unlike cloud-based SaaS tools, it gives you full control over your data, workflows, and integrations. -OpenClaw is more than just a chatbot; it's a truly functional "agent" designed for real-world execution. It features persistent memory, access to your local file system and the internet, and the ability to grow more powerful by expanding its "skills". +With OpenClaw, you can interact through Telegram, WhatsApp, Lark, or directly via a web dashboard to: -Because it's open-source and self-hosted, OpenClaw has attracted a vibrant community of developers and tech enthusiasts. The community has pioneered creative use cases, from automating business operations to managing personal life—showcasing the immense potential of a truly personal AI. +- Automate tasks +- Manage files and memory +- Execute real-world actions +- Build powerful AI agents with persistent memory -This guide will walk you through everything from scratch: downloading, installing, and setting up OpenClaw, as well as connecting it to the BankOfAI API. By the end, you'll have built your very own AI assistant. +This guide walks you from **zero to a fully working private AI agent powered by BankOfAI**. --- # Step 1: Get Your BankOfAI API Key -Log in to https://chat.bankofai.io/chat. - -Navigate to the API key management page and apply for your `api_key`. +1. Visit: https://chat.bankofai.io/chat +2. Log in to your account +3. Navigate to the API Key page +4. Generate and copy your API key --- # Step 2: Prepare Your System -Before installing, make sure your system meets these basic requirements. OpenClaw is designed for Unix-like environments but runs perfectly on Windows via WSL2 (Windows Subsystem for Linux 2). +Make sure your system meets the following requirements: | Requirement | Details | |---|---| -| Node.js | Version 22 or higher. This is the runtime environment for OpenClaw. | -| Operating System | macOS, Linux, or Windows (via WSL2). | -| Package Manager | pnpm is required to compile from source. For a standard install, npm (which comes with Node.js) is recommended. | +| Node.js | **Recommended: v24** | +| | Supported: v22 LTS (>=22.16) | +| OS | macOS / Linux / Windows (WSL2) | +| Package Manager | npm (default) | -To check your environment, open a terminal and run: +Check your Node version: -``` +```bash node -v ``` -If the version is lower than **v22.0.0**, or you see a **"command not found"** error, please install or upgrade Node.js from the official website. - --- # Step 3: Install OpenClaw -OpenClaw supports several installation methods. For beginners, the official one-line installation script is the best choice, as it automatically handles most of the setup. - -This method is the quickest and easiest as it will detect your OS, install dependencies, and make the `openclaw` command available globally. +### Recommended (Official Script) -For macOS or Linux terminals, execute the following: - -``` -npm install -g openclaw +```bash +curl -fsSL https://openclaw.ai/install.sh | bash ``` -For Windows users using PowerShell, run the same command: +### Alternative (npm global install) -``` +```bash npm install -g openclaw ``` --- -## Troubleshooting Common Errors - -### Problem 1: Sharp Module Error - -On some systems—especially macOS where **libvips** was installed via Homebrew—you might encounter an error with the **sharp module** (an image processing library). - -To fix this, try forcing the installation of pre-built binaries, which bypasses local compilation: - -``` -npm install -g openclaw --force -``` - ---- - -### Problem 2: "Command Not Found" - -After installation, you might see: - -``` -openclaw: command not found -``` - -This usually means your system can't find where globally installed npm packages are located. +## Fix: Command Not Found -Find npm's global installation path by running: +If `openclaw` is not recognized: -``` +```bash npm config get prefix ``` -If the output is, for example: - -``` -/usr/local -``` - -Then your binaries live in: - -``` -/usr/local/bin -``` - -You will need to add this path to your shell profile (`~/.zshrc` or `~/.bashrc`): +Then add to PATH: -``` -export PATH="/usr/local/bin:$PATH" +```bash +export PATH="$(npm prefix -g)/bin:$PATH" ``` -After saving the file, restart your terminal or run: +Apply changes: -``` +```bash source ~/.zshrc ``` -(or your specific config file) - -The `openclaw` command should now work. - --- -# Step 4: Complete the Initialization Wizard - -After installation, the onboarding wizard should trigger automatically. - -If you accidentally closed the window, you can restart the wizard (and install the background daemon) by running: +## Fix: Sharp Module Error (macOS) +```bash +npm install -g openclaw --force ``` -openclaw onboard -``` - -The wizard will walk you through three primary sections: - -### AI Model Configuration -The wizard will request an API key for the large language model service (Anthropic Claude, OpenAI GPT, etc.). +--- -→ For now, select **"Skip for now"**. We'll set this up manually in the next step. +# Step 4: Run Onboarding -### Communication Channels +```bash +openclaw onboard --install-daemon +``` -Choose which messaging apps you want to use to talk to OpenClaw. +During setup: -Examples: +### AI Model +→ Select **Skip for now** -- Telegram -- WhatsApp +### Channels +→ Optional (you can skip) ### Skills - -We recommend selecting **No**. - -(Use the **Spacebar** to toggle selections and **Enter** to confirm) - -You can add skills later. - -Once the wizard finishes and the OpenClaw UI launches, you will need to manually edit the configuration file to connect it to **BankOfAI**. +→ Select **No** --- -# Step 5: Configure the BankOfAI Model - -After completing the onboarding wizard, you'll need to manually add your **BankOfAI configuration** to OpenClaw and set it as the default model. - -There are two ways to complete the configuration: - -- One-Click Script -- Manual Configuration (below) - ---- +# Step 5: Configure BankOfAI -## 5.1 Edit the Configuration File +Open config file: -Open the configuration file located at: - -``` +```bash ~/.openclaw/openclaw.json ``` -OpenClaw reads this file at startup to load all its LLM configurations. - -Locate the `"models"` section and merge the following JSON snippet. +--- -Be sure to replace `{BANKOFAI_API_KEY}` with the unique key generated in your API key management dashboard. +## 5.1 Add BankOfAI Provider -``` +```json { "models": { "mode": "merge", @@ -193,30 +129,12 @@ Be sure to replace `{BANKOFAI_API_KEY}` with the unique key generated in your AP "apiKey": "{BANKOFAI_API_KEY}", "api": "openai-completions", "models": [ - { - "id": "gpt-5.2", - "name": "gpt-5.2" - }, - { - "id": "gpt-5-mini", - "name": "gpt-5-mini" - }, - { - "id": "gpt-5-nano", - "name": "gpt-5-nano" - }, - { - "id": "claude-opus-4.6", - "name": "claude-opus-4.6" - }, - { - "id": "claude-sonnet-4.6", - "name": "claude-sonnet-4.6" - }, - { - "id": "claude-haiku-4.5", - "name": "claude-haiku-4.5" - } + { "id": "gpt-5.2", "name": "gpt-5.2" }, + { "id": "gpt-5-mini", "name": "gpt-5-mini" }, + { "id": "gpt-5-nano", "name": "gpt-5-nano" }, + { "id": "claude-opus-4.6", "name": "claude-opus-4.6" }, + { "id": "claude-sonnet-4.6", "name": "claude-sonnet-4.6" }, + { "id": "claude-haiku-4.5", "name": "claude-haiku-4.5" } ] } } @@ -226,13 +144,9 @@ Be sure to replace `{BANKOFAI_API_KEY}` with the unique key generated in your AP --- -## 5.2 Set the Default Model - -In the same `openclaw.json` file, locate the `agents` section and set the default model. - -Example: +## 5.2 Set Default Model -``` +```json { "agents": { "default": { @@ -244,190 +158,133 @@ Example: --- -## 5.3 Restart the Gateway +## 5.3 Apply Changes -For the configuration changes to take effect, restart the OpenClaw gateway: +OpenClaw usually hot-reloads config automatically. -``` +If not: + +```bash openclaw gateway restart ``` --- -## 5.4 Test the Connection - -Send a test message from your terminal: - -``` -openclaw chat "How are you doing today?" -``` - -Or: +## 5.4 Test Your Setup +```bash +openclaw agent --message "Hello, are you working?" ``` -openclaw agent --agent main --message "How are you doing today?" -``` - -If you receive a coherent response, congratulations — you have successfully connected **OpenClaw to BankOfAI**. - ---- - -# Step 6: Understand Gateway and Diagnostic Commands - -If you encounter issues during configuration or while running the program, it helps to understand what the **Gateway** is and how to use the built-in diagnostic tools. - ---- - -## What is the Gateway? - -During setup, you'll frequently encounter the term **Gateway**. - -| Action | Command | -|---|---| -| Install the Gateway | `openclaw gateway install` | -| Start the Gateway | `openclaw gateway start` | -| Stop the Gateway | `openclaw gateway stop` | -| Restart the Gateway | `openclaw gateway restart` | -| Uninstall the Gateway | `openclaw gateway uninstall` | -| Check Gateway Status | `openclaw gateway status` | --- -## Diagnostic Commands +# Step 6: Diagnostics -After onboarding and updating your configuration file, run the following diagnostic commands. +## Health Check -| Command | Description | -|---|---| -| openclaw doctor | Performs a comprehensive health check of your system environment and configuration | -| openclaw gateway status | Shows real-time information about the Gateway | - -Example: - -``` +```bash openclaw doctor ``` -Follow the prompts in the output to resolve any errors. - -You can also monitor the system status using: +## Gateway Status -``` +```bash openclaw gateway status ``` -If the Gateway is functioning correctly, the output will display a **Healthy** status. - --- -# Step 7: Launch OpenClaw +## Gateway Commands -With the configuration complete, you can interact with your AI assistant through either a **web dashboard** or a **terminal interface**. +| Action | Command | +|---|---| +| Install | `openclaw gateway install` | +| Start | `openclaw gateway start` | +| Stop | `openclaw gateway stop` | +| Restart | `openclaw gateway restart` | +| Status | `openclaw gateway status` | --- -## Option 1: Web Dashboard - -OpenClaw's built-in web interface is called the **Dashboard**. +# Step 7: Start Using OpenClaw -First ensure the Gateway is running, then execute: +## Option 1: Dashboard (Recommended) +```bash +openclaw dashboard ``` -openclaw ui -``` - -This command will generate a URL containing a temporary login token and open it in your default web browser. -Default address: +Open in browser: ``` http://127.0.0.1:18789 ``` -From the Dashboard you can: +You can: -- Chat with your AI assistant -- View chat history -- Configure models, channels, and skills -- Monitor system status +- Chat with your AI +- View memory +- Configure models +- Monitor system status --- -## Option 2: Terminal UI (TUI) +## Option 2: Terminal UI -For users who prefer working in the terminal: - -``` +```bash openclaw tui ``` -This launches a full-screen interface with: - -- Chat history -- Real-time system status -- Interactive input box - ---- - -### TUI Commands +### Commands | Command | Description | |---|---| -| /status | View current system status | -| /session | Switch chat session | -| /model | Change model | -| /help | List all commands | +| /status | Check system status | +| /session | Switch session | +| /model | Change model | +| /help | Help | --- -# Step 8: Master Essential Commands +# Step 8: Useful Commands -The OpenClaw CLI is powerful for automation and configuration. - ---- +## Model Status -## 1. Check Model Status - -``` +```bash openclaw models status ``` -Use this to verify: - -- API key validity -- model connectivity -- expiration status +## Channels ---- - -## 2. Manage Communication Channels - -``` +```bash openclaw channels list ``` -Displays all connected messaging platforms and their status. +## Memory Search ---- - -## 3. Query Long-Term Memory - -``` +```bash openclaw memory search "keyword" ``` -Search semantic memory stored by your AI assistant. +## Docs + +```bash +openclaw docs +``` --- -## 4. View Documentation +# ✅ Done -``` -openclaw docs -``` +You now have a fully working **OpenClaw + BankOfAI private AI agent**. + +You can now: -Opens the official documentation portal. +- Build automation workflows +- Connect Telegram bots +- Execute on-chain operations +- Create your own AI agent product --- -You now have a fully functioning **OpenClaw AI agent powered by BankOfAI**. +🚀 Welcome to your personal AI infrastructure. From da9e087475d6d3913fd3e12f3e87e8357ed00079 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 15:14:30 +0800 Subject: [PATCH 46/78] Update integration-guide.md --- .../llm-service/openclaw/integration-guide.md | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index c99d168e..be60c29b 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -1,5 +1,5 @@ -# Integrating OpenClaw with BankOfAI -## From Zero to Private Agent: Deploying OpenClaw with BankOfAI in 15 Minutes +# Integrating OpenClaw with Bank of AI +## From Zero to Private Agent: Deploying OpenClaw with Bank of AI in 15 Minutes OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant that runs locally on your machine. Unlike cloud-based SaaS tools, it gives you full control over your data, workflows, and integrations. @@ -10,11 +10,11 @@ With OpenClaw, you can interact through Telegram, WhatsApp, Lark, or directly vi - Execute real-world actions - Build powerful AI agents with persistent memory -This guide walks you from **zero to a fully working private AI agent powered by BankOfAI**. +This guide walks you from **zero to a fully working private AI agent powered by Bank of AI**. --- -# Step 1: Get Your BankOfAI API Key +# Step 1: Get Your Bank of AI API Key 1. Visit: https://chat.bankofai.io/chat 2. Log in to your account @@ -29,10 +29,10 @@ Make sure your system meets the following requirements: | Requirement | Details | |---|---| -| Node.js | **Recommended: v24** | +| Node.js | **Recommended: v24+** | | | Supported: v22 LTS (>=22.16) | | OS | macOS / Linux / Windows (WSL2) | -| Package Manager | npm (default) | +| Package Manager | npm (default) or pnpm (for building from source) | Check your Node version: @@ -107,7 +107,7 @@ During setup: --- -# Step 5: Configure BankOfAI +# Step 5: Configure Bank of AI Open config file: @@ -117,7 +117,7 @@ Open config file: --- -## 5.1 Add BankOfAI Provider +## 5.1 Add Bank of AI Provider ```json { @@ -276,7 +276,7 @@ openclaw docs # ✅ Done -You now have a fully working **OpenClaw + BankOfAI private AI agent**. +You now have a fully working **OpenClaw + Bank of AI private AI agent**. You can now: @@ -287,4 +287,4 @@ You can now: --- -🚀 Welcome to your personal AI infrastructure. +🚀 Welcome to your personal AI infrastructure powered by Bank of AI. From b720194e5d06436904681df9d519ec374c26cd95 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 16:28:51 +0800 Subject: [PATCH 47/78] Update integration-guide.md --- .../llm-service/openclaw/integration-guide.md | 421 +++++++++++------- 1 file changed, 265 insertions(+), 156 deletions(-) diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index be60c29b..2de1d59a 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -1,125 +1,177 @@ # Integrating OpenClaw with Bank of AI ## From Zero to Private Agent: Deploying OpenClaw with Bank of AI in 15 Minutes -OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant that runs locally on your machine. Unlike cloud-based SaaS tools, it gives you full control over your data, workflows, and integrations. +OpenClaw (formerly ClawdBot or Moltbot) is an open-source personal AI assistant. Unlike cloud-based SaaS tools, OpenClaw runs locally on your own machine, giving you full control over your data and workflows. You can interact with it through familiar messaging platforms like WhatsApp, Telegram, Lark, and DingTalk to handle emails, manage calendars, write code, or even automate your smart home. -With OpenClaw, you can interact through Telegram, WhatsApp, Lark, or directly via a web dashboard to: +OpenClaw is more than just a chatbot; it's a truly functional "agent" designed for real-world execution. It features persistent memory, access to your local file system and the internet, and the ability to grow more powerful by expanding its "skills". -- Automate tasks -- Manage files and memory -- Execute real-world actions -- Build powerful AI agents with persistent memory +Because it's open-source and self-hosted, OpenClaw has attracted a vibrant community of developers and tech enthusiasts. The community has pioneered creative use cases, from automating business operations to managing personal life—showcasing the immense potential of a truly personal AI. -This guide walks you from **zero to a fully working private AI agent powered by Bank of AI**. +This guide will walk you through everything from scratch: downloading, installing, and setting up OpenClaw, as well as connecting it to the Bank of AI API. By the end, you'll have built your very own AI assistant. ---- +*** -# Step 1: Get Your Bank of AI API Key +## **Step 1: Get Your Bank of AI API Key** -1. Visit: https://chat.bankofai.io/chat -2. Log in to your account -3. Navigate to the API Key page -4. Generate and copy your API key +*** ---- +1. Log in to . +2. Navigate to the API key management page and apply for your api_key. -# Step 2: Prepare Your System +*** -Make sure your system meets the following requirements: +## **Step 2: Prepare Your System** -| Requirement | Details | -|---|---| -| Node.js | **Recommended: v24+** | -| | Supported: v22 LTS (>=22.16) | -| OS | macOS / Linux / Windows (WSL2) | -| Package Manager | npm (default) or pnpm (for building from source) | +Before installing, make sure your system meets these basic requirements. OpenClaw is designed for Unix-like environments but runs perfectly on Windows via WSL2 (Windows Subsystem for Linux 2). -Check your Node version: +| Requirement | Details | +| ---------------- | --------------------------------------------------------------------------------------------------------------- | +| Node.js | Version 22 or higher. This is the runtime environment for OpenClaw. | +| Operating System | macOS, Linux, or Windows (via WSL2). | +| Package Manager | pnpm is required to compile from source. For a standard install, npm (which comes with Node.js) is recommended. | -```bash -node -v -``` +To check your environment, open a terminal and run: + +[block:image] +{ + "images": [ + { + "image": [ + "https://files.readme.io/ac27744855c7066d117a856e7005166662e707462312b1925c4e368f5c9c7427-1.png", + null, + "" + ], + "align": "center" + } + ] +} +[/block] ---- +If the version is lower than v22.0.0, or you see a "command not found" error, please install or upgrade Node.js from the [official website](https://nodejs.org/). -# Step 3: Install OpenClaw +*** -### Recommended (Official Script) +## **Step 3: Install OpenClaw** -```bash -curl -fsSL https://openclaw.ai/install.sh | bash +OpenClaw supports several installation methods. For beginners, the official one-line installation script is the best choice, as it automatically handles most of the setup. + +This method is the quickest and easiest as it will detect your OS, install dependencies, and make the openclaw command available globally. + +For macOS or Linux terminals, execute the following: + +``` +npm install -g openclaw ``` -### Alternative (npm global install) +For Windows users using PowerShell, run the same command: -```bash +``` npm install -g openclaw ``` ---- +### **Troubleshooting Common Errors** -## Fix: Command Not Found +#### **Problem 1: Sharp Module Error** -If `openclaw` is not recognized: +On some systems—especially macOS where libvips was installed via Homebrew—you might encounter an error with the sharp module (an image processing library). To fix this, try forcing the installation of pre-built binaries, which bypasses local compilation: -```bash -npm config get prefix ``` +npm install -g openclaw --force +``` + +#### **Problem 2: "Command Not Found"** -Then add to PATH: +After installation, you might see "openclaw: command not found". This usually means your system can't find where globally installed npm packages are located. Find npm's global installation path by running: -```bash -export PATH="$(npm prefix -g)/bin:$PATH" +``` +npm config get prefix ``` -Apply changes: +If the output is, for example, /usr/local, then your binaries live in /usr/local/bin. You will need to add this path to your shell profile (\~/.zshrc or \~/.bashrc): -```bash -source ~/.zshrc +``` +export PATH="/usr/local/bin:$PATH" ``` ---- +After saving the file, restart your terminal or run source \~/.zshrc (or your specific config file) to apply the changes. The openclaw command should now work. -## Fix: Sharp Module Error (macOS) +
-```bash -npm install -g openclaw --force -``` +[block:image] +{ + "images": [ + { + "image": [ + "https://files.readme.io/6f39989ed49f307a5168bd934c4ffaf2a79db8a2268c6959c3bb731f43138598-2.png", + null, + "" + ], + "align": "center" + } + ] +} +[/block] + +*** + +## **Step 4: Complete the Initialization Wizard** ---- +After installation, the onboarding wizard should trigger automatically: -# Step 4: Run Onboarding +[block:image] +{ + "images": [ + { + "image": [ + "https://files.readme.io/aad882d79978e215c2c03278d008893d004a4d14abe097d4d4a046f3c9241a1f-3.png", + null, + "" + ], + "align": "center" + } + ] +} +[/block] -```bash -openclaw onboard --install-daemon +If you accidentally closed the window, you can restart the wizard (and install the background daemon) by running: + +``` +openclaw onboard ``` -During setup: +The wizard will walk you through three primary sections: -### AI Model -→ Select **Skip for now** +- AI Model Configuration: The wizard will request an API key for the large language model service (Anthropic Claude, OpenAI GPT, etc.). + → For now, select "Skip for now". We'll set this up manually in the next step. -### Channels -→ Optional (you can skip) + [block:image]{"images":[{"image":["https://files.readme.io/458a23b58f79ec97f4fee7ed6668a6a799cb37f95be57ddfe96830ad77bb2bb5-4.png",null,""],"align":"center"}]}[/block] -### Skills -→ Select **No** +
+- Communication Channels: Choose which messaging apps you want to use to talk to OpenClaw (e.g., Telegram, WhatsApp). ---- + [block:image]{"images":[{"image":["https://files.readme.io/178a9aded9bb4934dcaf7445c0c4edf4569836183ac6019b8c8d08e2917b4549-5.png",null,""],"align":"center"}]}[/block] +- Skills: We recommend selecting No (Use the Spacebar to toggle selections and Enter to confirm). You may also skip and add them later. -# Step 5: Configure Bank of AI + [block:image]{"images":[{"image":["https://files.readme.io/d4155d69eb1dd69c1586fa5ab844c602914b0b5a1934265bd97e1f8444b40b5a-6.png",null,""],"align":"center"}]}[/block] -Open config file: +Once the wizard finishes and the OpenClaw UI launches, you will need to manually edit the configuration file to connect it to Bank of AI. -```bash -~/.openclaw/openclaw.json -``` +*** + +## **Step 5: Configure the Bank of AI Model** + +After completing the onboarding wizard, you'll need to manually add your Bank of AI configuration to OpenClaw and set it as the default model. There are two ways to complete the configuration: + +- **[One-Click Script](https://docs.ainft.com/reference/openclaw-ainft-integration-one-click-script-tutorial)**: Please refer to this article. +- **Manual Configuration**: Please follow the instructions below. + +### **5.1 Edit the Configuration File** ---- +Open the configuration file located at \~/.openclaw/openclaw.json. OpenClaw reads this file at startup to load all its LLM configurations. -## 5.1 Add Bank of AI Provider +Locate the "models" section and merge the following JSON snippet. Be sure to replace {BANKOFAI_API_KEY} with the unique key generated in your [API key management dashboard](https://chat.bankofai.io/key). -```json +``` { "models": { "mode": "merge", @@ -129,12 +181,30 @@ Open config file: "apiKey": "{BANKOFAI_API_KEY}", "api": "openai-completions", "models": [ - { "id": "gpt-5.2", "name": "gpt-5.2" }, - { "id": "gpt-5-mini", "name": "gpt-5-mini" }, - { "id": "gpt-5-nano", "name": "gpt-5-nano" }, - { "id": "claude-opus-4.6", "name": "claude-opus-4.6" }, - { "id": "claude-sonnet-4.6", "name": "claude-sonnet-4.6" }, - { "id": "claude-haiku-4.5", "name": "claude-haiku-4.5" } + { + "id": "gpt-5.2", + "name": "gpt-5.2" + }, + { + "id": "gpt-5-mini", + "name": "gpt-5-mini" + }, + { + "id": "gpt-5-nano", + "name": "gpt-5-nano" + }, + { + "id": "claude-opus-4.6", + "name": "claude-opus-4.6" + }, + { + "id": "claude-sonnet-4.6", + "name": "claude-sonnet-4.6" + }, + { + "id": "claude-haiku-4.5", + "name": "claude-haiku-4.5" + } ] } } @@ -142,11 +212,11 @@ Open config file: } ``` ---- +### **5.2 Set the Default Model** -## 5.2 Set Default Model +In the same openclaw.json file, locate the agents section, and set the default model to your preferred choice. This example uses bankofai/gpt-5-nano: -```json +``` { "agents": { "default": { @@ -156,135 +226,174 @@ Open config file: } ``` ---- - -## 5.3 Apply Changes +### **5.3 Restart the Gateway** -OpenClaw usually hot-reloads config automatically. +For the configuration changes to take effect, you must restart the OpenClaw gateway: -If not: - -```bash +``` openclaw gateway restart ``` ---- +### **5.4 Test the Connection** -## 5.4 Test Your Setup +Send a test message from your terminal to verify everything is working: -```bash -openclaw agent --message "Hello, are you working?" +``` +openclaw agent --agent main --message "How are you doing today?" ``` ---- +[block:image] +{ + "images": [ + { + "image": [ + "https://files.readme.io/312c6feb51ea9f4071b75efd3182ef0507a6981baebd3bf7e8d57dec33978efd-7.png", + null, + "" + ], + "align": "center" + } + ] +} +[/block] -# Step 6: Diagnostics +If you receive a coherent response, congrats—you have successfully connected OpenClaw to Bank of AI! -## Health Check +*** -```bash -openclaw doctor -``` +## **Step 6: Understand Gateway and Diagnostic Commands** -## Gateway Status +If you encounter issues during configuration or while running the program, it helps to understand what the Gateway is and how to use the built-in diagnostic tools. -```bash -openclaw gateway status -``` +### **What is the Gateway?** ---- +During setup, you'll frequently encounter the term "Gateway". -## Gateway Commands +| Action | Command | +| --------------------- | -------------------------- | +| Install the Gateway | openclaw gateway install | +| Start the Gateway | openclaw gateway start | +| Stop the Gateway | openclaw gateway stop | +| Restart the Gateway | openclaw gateway restart | +| Uninstall the Gateway | openclaw gateway uninstall | +| Check Gateway Status | openclaw gateway status | -| Action | Command | -|---|---| -| Install | `openclaw gateway install` | -| Start | `openclaw gateway start` | -| Stop | `openclaw gateway stop` | -| Restart | `openclaw gateway restart` | -| Status | `openclaw gateway status` | +### **Diagnostic Command** ---- +Once completing onboarding and updating your configuration file, it's wise to run the following diagnostic commands to ensure everything is set up correctly. -# Step 7: Start Using OpenClaw +| Command | Description | +| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| openclaw doctor | Performs a comprehensive health check of your system environment and configuration. Run this first if you encounter any issues; it provides specific recommended actions for each error found. | +| openclaw gateway status | Shows real-time information about the Gateway. | -## Option 1: Dashboard (Recommended) +Example: -```bash -openclaw dashboard ``` +openclaw doctor +``` + +Simply follow the prompts and suggestions in the output to resolve any errors. You can use openclaw gateway status (or simply openclaw status) to monitor the real-time status of the Gateway and all active sessions. If the Gateway is functioning correctly, the output will display a "Healthy" status with no exceptions. -Open in browser: +*** + +## **Step 7: Launch OpenClaw** + +With the configuration complete, you can interact with your AI assistant through either a web-based dashboard or a Terminal User Interface (TUI). + +### **Option 1: Web Dashboard** + +OpenClaw's built-in web interface, called the Dashboard (or Control UI), is the most intuitive way to manage and use your assistant. + +To launch the Dashboard: First, ensure the Gateway is running. In your terminal, execute: ``` -http://127.0.0.1:18789 +openclaw ui ``` -You can: +This command will generate a URL containing a temporary login token and open it in your default web browser. By default, the address is http://127.0.0.1:18789, though the port may vary based on your configuration. -- Chat with your AI -- View memory -- Configure models -- Monitor system status +From the Dashboard, you can: ---- +- Chat with your AI assistant in real-time. +- View and manage your chat history. +- Configure models, channels, and skills. +- Track the real-time status of the system. -## Option 2: Terminal UI +### **Option 2: The Terminal UI (TUI)** -```bash +For users who prefer to work within the terminal, OpenClaw offers a full-screen text-based interface called the TUI. + +Ensure your Gateway is running, then start it with: + +``` openclaw tui ``` -### Commands +The command launches a full-screen application featuring dedicated sections for chat history, real-time system status, and an interactive input box. -| Command | Description | -|---|---| -| /status | Check system status | -| /session | Switch session | -| /model | Change model | -| /help | Help | +[block:image] +{ + "images": [ + { + "image": [ + "https://files.readme.io/9ca9be0db1c21be12488934030c8ef3076c17fbb4fc26cfdb0e71425e46d121d-8.png", + null, + "" + ], + "align": "center" + } + ] +} +[/block] ---- +You can send messages just like in a chat app. The TUI also supports the following slash commands for quick actions: -# Step 8: Useful Commands +| Command | Description | +| ---------------- | --------------------------------------- | +| /status | View the current system status. | +| /session \ | Switch to a specific chat session. | +| /model \ | Switch the LLM for the current session. | +| /help | View a list of all available commands. | -## Model Status +The TUI is optimized for a focused, keyboard-centric experience, making it the ideal choice for developers who prefer staying within the terminal environment. -```bash -openclaw models status -``` +## **Step 8: Master Essential Commands** -## Channels +Beyond the graphical interfaces, the OpenClaw Command Line Interface (CLI) is also a powerful tool designed for advanced configuration and automation. -```bash -openclaw channels list -``` +### **Core Management Commands** -## Memory Search +Here are some of the most useful commands for daily use: + +#### **1\. Check Model Status** -```bash -openclaw memory search "keyword" +``` +openclaw models status ``` -## Docs +Use this to check the status of your configured AI models and their API keys—for example, to see if a key is still valid or is about to expire. Ensuring your models are healthy is the first step to a functioning assistant. -```bash -openclaw docs +#### **2\. Manage Communication Channels** + +``` +openclaw channels list ``` ---- +This displays all the messaging platforms you have configured and their current connection status. -# ✅ Done +#### **3\. Query Long-Term Memory** -You now have a fully working **OpenClaw + Bank of AI private AI agent**. +``` +openclaw memory search "keyword" +``` -You can now: +This executes a semantic search of your AI assistant's long-term memory, allowing you to retrieve any information you've shared with it in the past. -- Build automation workflows -- Connect Telegram bots -- Execute on-chain operations -- Create your own AI agent product +#### **4\. View Documentation** ---- +``` +openclaw docs +``` -🚀 Welcome to your personal AI infrastructure powered by Bank of AI. +If you want to dive deeper into a specific command or feature, this command provides a direct portal to the official OpenClaw documentation. From 61fd4fbbd3aa6d8a8007a52c79044c34cdd85aa3 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 16:30:55 +0800 Subject: [PATCH 48/78] Update integration-guide.md --- .../llm-service/openclaw/integration-guide.md | 294 ++++++++---------- 1 file changed, 124 insertions(+), 170 deletions(-) diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index 2de1d59a..581794b2 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -9,16 +9,14 @@ Because it's open-source and self-hosted, OpenClaw has attracted a vibrant commu This guide will walk you through everything from scratch: downloading, installing, and setting up OpenClaw, as well as connecting it to the Bank of AI API. By the end, you'll have built your very own AI assistant. -*** +--- ## **Step 1: Get Your Bank of AI API Key** -*** +1. Log in to https://chat.bankofai.io/chat +2. Navigate to the API key management page and apply for your api_key -1. Log in to . -2. Navigate to the API key management page and apply for your api_key. - -*** +--- ## **Step 2: Prepare Your System** @@ -32,24 +30,13 @@ Before installing, make sure your system meets these basic requirements. OpenCla To check your environment, open a terminal and run: -[block:image] -{ - "images": [ - { - "image": [ - "https://files.readme.io/ac27744855c7066d117a856e7005166662e707462312b1925c4e368f5c9c7427-1.png", - null, - "" - ], - "align": "center" - } - ] -} -[/block] +```bash +node -v +``` -If the version is lower than v22.0.0, or you see a "command not found" error, please install or upgrade Node.js from the [official website](https://nodejs.org/). +If the version is lower than v22.0.0, or you see a "command not found" error, please install or upgrade Node.js from the official website: https://nodejs.org/ -*** +--- ## **Step 3: Install OpenClaw** @@ -59,13 +46,13 @@ This method is the quickest and easiest as it will detect your OS, install depen For macOS or Linux terminals, execute the following: -``` +```bash npm install -g openclaw ``` For Windows users using PowerShell, run the same command: -``` +```bash npm install -g openclaw ``` @@ -75,103 +62,90 @@ npm install -g openclaw On some systems—especially macOS where libvips was installed via Homebrew—you might encounter an error with the sharp module (an image processing library). To fix this, try forcing the installation of pre-built binaries, which bypasses local compilation: -``` +```bash npm install -g openclaw --force ``` #### **Problem 2: "Command Not Found"** -After installation, you might see "openclaw: command not found". This usually means your system can't find where globally installed npm packages are located. Find npm's global installation path by running: +After installation, you might see: +```bash +openclaw: command not found ``` + +This usually means your system can't find where globally installed npm packages are located. Find npm's global installation path by running: + +```bash npm config get prefix ``` -If the output is, for example, /usr/local, then your binaries live in /usr/local/bin. You will need to add this path to your shell profile (\~/.zshrc or \~/.bashrc): +If the output is, for example, `/usr/local`, then your binaries live in `/usr/local/bin`. You will need to add this path to your shell profile (`~/.zshrc` or `~/.bashrc`): -``` +```bash export PATH="/usr/local/bin:$PATH" ``` -After saving the file, restart your terminal or run source \~/.zshrc (or your specific config file) to apply the changes. The openclaw command should now work. +After saving the file, restart your terminal or run: -
+```bash +source ~/.zshrc +``` -[block:image] -{ - "images": [ - { - "image": [ - "https://files.readme.io/6f39989ed49f307a5168bd934c4ffaf2a79db8a2268c6959c3bb731f43138598-2.png", - null, - "" - ], - "align": "center" - } - ] -} -[/block] +The openclaw command should now work. -*** +--- ## **Step 4: Complete the Initialization Wizard** -After installation, the onboarding wizard should trigger automatically: - -[block:image] -{ - "images": [ - { - "image": [ - "https://files.readme.io/aad882d79978e215c2c03278d008893d004a4d14abe097d4d4a046f3c9241a1f-3.png", - null, - "" - ], - "align": "center" - } - ] -} -[/block] +After installation, the onboarding wizard should trigger automatically. If you accidentally closed the window, you can restart the wizard (and install the background daemon) by running: -``` +```bash openclaw onboard ``` The wizard will walk you through three primary sections: -- AI Model Configuration: The wizard will request an API key for the large language model service (Anthropic Claude, OpenAI GPT, etc.). - → For now, select "Skip for now". We'll set this up manually in the next step. - - [block:image]{"images":[{"image":["https://files.readme.io/458a23b58f79ec97f4fee7ed6668a6a799cb37f95be57ddfe96830ad77bb2bb5-4.png",null,""],"align":"center"}]}[/block] +- **AI Model Configuration** + The wizard will request an API key for the large language model service (Anthropic Claude, OpenAI GPT, etc.). + → For now, select **"Skip for now"**. We'll set this up manually in the next step. -
-- Communication Channels: Choose which messaging apps you want to use to talk to OpenClaw (e.g., Telegram, WhatsApp). +- **Communication Channels** + Choose which messaging apps you want to use to talk to OpenClaw (e.g., Telegram, WhatsApp). - [block:image]{"images":[{"image":["https://files.readme.io/178a9aded9bb4934dcaf7445c0c4edf4569836183ac6019b8c8d08e2917b4549-5.png",null,""],"align":"center"}]}[/block] -- Skills: We recommend selecting No (Use the Spacebar to toggle selections and Enter to confirm). You may also skip and add them later. - - [block:image]{"images":[{"image":["https://files.readme.io/d4155d69eb1dd69c1586fa5ab844c602914b0b5a1934265bd97e1f8444b40b5a-6.png",null,""],"align":"center"}]}[/block] +- **Skills** + We recommend selecting **No** (use the Spacebar to toggle selections and Enter to confirm). You can add them later. Once the wizard finishes and the OpenClaw UI launches, you will need to manually edit the configuration file to connect it to Bank of AI. -*** +--- ## **Step 5: Configure the Bank of AI Model** -After completing the onboarding wizard, you'll need to manually add your Bank of AI configuration to OpenClaw and set it as the default model. There are two ways to complete the configuration: +After completing the onboarding wizard, you'll need to manually add your Bank of AI configuration to OpenClaw and set it as the default model. -- **[One-Click Script](https://docs.ainft.com/reference/openclaw-ainft-integration-one-click-script-tutorial)**: Please refer to this article. -- **Manual Configuration**: Please follow the instructions below. +There are two ways to complete the configuration: -### **5.1 Edit the Configuration File** +- **One-Click Script**: https://docs.ainft.com/reference/openclaw-ainft-integration-one-click-script-tutorial +- **Manual Configuration**: Follow the instructions below -Open the configuration file located at \~/.openclaw/openclaw.json. OpenClaw reads this file at startup to load all its LLM configurations. +--- + +### **5.1 Edit the Configuration File** -Locate the "models" section and merge the following JSON snippet. Be sure to replace {BANKOFAI_API_KEY} with the unique key generated in your [API key management dashboard](https://chat.bankofai.io/key). +Open the configuration file located at: +```bash +~/.openclaw/openclaw.json ``` + +OpenClaw reads this file at startup to load all its LLM configurations. + +Locate the `"models"` section and merge the following JSON snippet. Be sure to replace `{BANKOFAI_API_KEY}` with your actual API key from https://chat.bankofai.io/key. + +```json { "models": { "mode": "merge", @@ -212,11 +186,13 @@ Locate the "models" section and merge the following JSON snippet. Be sure to rep } ``` +--- + ### **5.2 Set the Default Model** -In the same openclaw.json file, locate the agents section, and set the default model to your preferred choice. This example uses bankofai/gpt-5-nano: +In the same `openclaw.json` file, locate the `agents` section and set the default model: -``` +```json { "agents": { "default": { @@ -226,40 +202,29 @@ In the same openclaw.json file, locate the agents section, and set the default m } ``` +--- + ### **5.3 Restart the Gateway** -For the configuration changes to take effect, you must restart the OpenClaw gateway: +For the configuration changes to take effect, restart the OpenClaw gateway: -``` +```bash openclaw gateway restart ``` +--- + ### **5.4 Test the Connection** -Send a test message from your terminal to verify everything is working: +Send a test message from your terminal: -``` +```bash openclaw agent --agent main --message "How are you doing today?" ``` -[block:image] -{ - "images": [ - { - "image": [ - "https://files.readme.io/312c6feb51ea9f4071b75efd3182ef0507a6981baebd3bf7e8d57dec33978efd-7.png", - null, - "" - ], - "align": "center" - } - ] -} -[/block] - -If you receive a coherent response, congrats—you have successfully connected OpenClaw to Bank of AI! +If you receive a coherent response, congratulations—you have successfully connected OpenClaw to Bank of AI! -*** +--- ## **Step 6: Understand Gateway and Diagnostic Commands** @@ -278,122 +243,111 @@ During setup, you'll frequently encounter the term "Gateway". | Uninstall the Gateway | openclaw gateway uninstall | | Check Gateway Status | openclaw gateway status | -### **Diagnostic Command** +--- -Once completing onboarding and updating your configuration file, it's wise to run the following diagnostic commands to ensure everything is set up correctly. +### **Diagnostic Commands** -| Command | Description | -| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| openclaw doctor | Performs a comprehensive health check of your system environment and configuration. Run this first if you encounter any issues; it provides specific recommended actions for each error found. | -| openclaw gateway status | Shows real-time information about the Gateway. | +Once completing onboarding and updating your configuration file, run: -Example: - -``` +```bash openclaw doctor ``` -Simply follow the prompts and suggestions in the output to resolve any errors. You can use openclaw gateway status (or simply openclaw status) to monitor the real-time status of the Gateway and all active sessions. If the Gateway is functioning correctly, the output will display a "Healthy" status with no exceptions. +You can also check status: -*** +```bash +openclaw gateway status +``` -## **Step 7: Launch OpenClaw** +If the Gateway is functioning correctly, it will show a **Healthy** status. -With the configuration complete, you can interact with your AI assistant through either a web-based dashboard or a Terminal User Interface (TUI). +--- -### **Option 1: Web Dashboard** +## **Step 7: Launch OpenClaw** -OpenClaw's built-in web interface, called the Dashboard (or Control UI), is the most intuitive way to manage and use your assistant. +With the configuration complete, you can interact with your AI assistant through either a web dashboard or a terminal interface. -To launch the Dashboard: First, ensure the Gateway is running. In your terminal, execute: +### **Option 1: Web Dashboard** -``` +```bash openclaw ui ``` -This command will generate a URL containing a temporary login token and open it in your default web browser. By default, the address is http://127.0.0.1:18789, though the port may vary based on your configuration. +Then open: -From the Dashboard, you can: +``` +http://127.0.0.1:18789 +``` -- Chat with your AI assistant in real-time. -- View and manage your chat history. -- Configure models, channels, and skills. -- Track the real-time status of the system. +You can: -### **Option 2: The Terminal UI (TUI)** +- Chat with your AI +- View history +- Configure models +- Monitor system status -For users who prefer to work within the terminal, OpenClaw offers a full-screen text-based interface called the TUI. +--- -Ensure your Gateway is running, then start it with: +### **Option 2: Terminal UI (TUI)** -``` +```bash openclaw tui ``` -The command launches a full-screen application featuring dedicated sections for chat history, real-time system status, and an interactive input box. - -[block:image] -{ - "images": [ - { - "image": [ - "https://files.readme.io/9ca9be0db1c21be12488934030c8ef3076c17fbb4fc26cfdb0e71425e46d121d-8.png", - null, - "" - ], - "align": "center" - } - ] -} -[/block] - -You can send messages just like in a chat app. The TUI also supports the following slash commands for quick actions: +### Commands | Command | Description | | ---------------- | --------------------------------------- | -| /status | View the current system status. | -| /session \ | Switch to a specific chat session. | -| /model \ | Switch the LLM for the current session. | -| /help | View a list of all available commands. | +| /status | View the current system status | +| /session | Switch to a specific chat session | +| /model | Switch the LLM | +| /help | View available commands | -The TUI is optimized for a focused, keyboard-centric experience, making it the ideal choice for developers who prefer staying within the terminal environment. +--- ## **Step 8: Master Essential Commands** -Beyond the graphical interfaces, the OpenClaw Command Line Interface (CLI) is also a powerful tool designed for advanced configuration and automation. +### 1. Check Model Status -### **Core Management Commands** - -Here are some of the most useful commands for daily use: - -#### **1\. Check Model Status** - -``` +```bash openclaw models status ``` -Use this to check the status of your configured AI models and their API keys—for example, to see if a key is still valid or is about to expire. Ensuring your models are healthy is the first step to a functioning assistant. +--- -#### **2\. Manage Communication Channels** +### 2. Manage Channels -``` +```bash openclaw channels list ``` -This displays all the messaging platforms you have configured and their current connection status. +--- -#### **3\. Query Long-Term Memory** +### 3. Search Memory -``` +```bash openclaw memory search "keyword" ``` -This executes a semantic search of your AI assistant's long-term memory, allowing you to retrieve any information you've shared with it in the past. +--- -#### **4\. View Documentation** +### 4. View Docs -``` +```bash openclaw docs ``` -If you want to dive deeper into a specific command or feature, this command provides a direct portal to the official OpenClaw documentation. +--- + +# ✅ Done + +You now have a fully working **OpenClaw + Bank of AI private AI agent**. + +You can now: + +- Build automation workflows +- Connect Telegram bots +- Execute on-chain operations +- Create your own AI agent product + +🚀 Welcome to your personal AI infrastructure powered by Bank of AI. From 42981f225c541a728ee4a3d0329ee05d731c2c6f Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 16:44:04 +0800 Subject: [PATCH 49/78] Update integration-guide.md --- docs/llm-service/openclaw/integration-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index 581794b2..9209bd46 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -138,7 +138,7 @@ There are two ways to complete the configuration: Open the configuration file located at: ```bash -~/.openclaw/openclaw.json +vim ~/.openclaw/openclaw.json ``` OpenClaw reads this file at startup to load all its LLM configurations. From c9a56476d15053db1242b2fb154e32f5689805a4 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 17:45:09 +0800 Subject: [PATCH 50/78] Create llm-service --- i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service | 1 + 1 file changed, 1 insertion(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service @@ -0,0 +1 @@ + From 6db77e934b4aabb1771c5aa4f2060df0d08a0847 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:06:47 +0800 Subject: [PATCH 51/78] Create introduction.md --- .../llm-service/introduction.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/llm-service/introduction.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/llm-service/introduction.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/llm-service/introduction.md new file mode 100644 index 00000000..995f9af8 --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/llm-service/introduction.md @@ -0,0 +1,19 @@ +# 欢迎使用 LLM Service + +## 关于 LLM Service + +LLM Service 是 Bank of AI 生态中的专业 AI 服务模块,基于顶级区块链基础设施构建,致力于为用户提供高效、易用且富有创造力的 AI 交互体验。作为 Bank of AI 的核心 AI 基础设施,该服务结合区块链的去中心化、安全性与高效率,打造全新的 AI 服务模式。 + +核心功能包括: + +- **多模型 AI 对话:** 集成多种行业领先的大语言模型(LLMs),用户可根据需求自由选择最合适的模型。 +- **强大的 AI 服务能力:** 提供完整的 AI API 服务,支持在 Bank of AI 框架内快速接入与集成。 +- **原生 Web3 体验:** 无缝集成主流 Web3 钱包,实现从登录到支付的一体化原生体验。 + +## 为什么选择 LLM Service? + +选择 LLM Service,即可同时享受区块链生态优势与高质量 AI 服务体验: + +- **多链生态优势:** 支持多链主流代币支付,具备快速确认与低手续费优势。 +- **低成本高效率:** 通过资源优化与高效链上交互,实现更具性价比的 AI 服务。 +- **安全与隐私保护:** 基于去中心化登录机制,用户仅需通过 Web3 钱包签名即可完成认证,保障交互过程的安全与隐私。 From 2f894934f4cd3cb8dfd4833bd60c560e59cb6bb3 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:15:32 +0800 Subject: [PATCH 52/78] Delete i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service --- i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service | 1 - 1 file changed, 1 deletion(-) delete mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service deleted file mode 100644 index 8b137891..00000000 --- a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service +++ /dev/null @@ -1 +0,0 @@ - From 4311b5a5648656a7349870c47d1bc12919508687 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:19:47 +0800 Subject: [PATCH 53/78] Create introduction.md --- .../current/llm-service/introduction.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/introduction.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/introduction.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/introduction.md new file mode 100644 index 00000000..995f9af8 --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/introduction.md @@ -0,0 +1,19 @@ +# 欢迎使用 LLM Service + +## 关于 LLM Service + +LLM Service 是 Bank of AI 生态中的专业 AI 服务模块,基于顶级区块链基础设施构建,致力于为用户提供高效、易用且富有创造力的 AI 交互体验。作为 Bank of AI 的核心 AI 基础设施,该服务结合区块链的去中心化、安全性与高效率,打造全新的 AI 服务模式。 + +核心功能包括: + +- **多模型 AI 对话:** 集成多种行业领先的大语言模型(LLMs),用户可根据需求自由选择最合适的模型。 +- **强大的 AI 服务能力:** 提供完整的 AI API 服务,支持在 Bank of AI 框架内快速接入与集成。 +- **原生 Web3 体验:** 无缝集成主流 Web3 钱包,实现从登录到支付的一体化原生体验。 + +## 为什么选择 LLM Service? + +选择 LLM Service,即可同时享受区块链生态优势与高质量 AI 服务体验: + +- **多链生态优势:** 支持多链主流代币支付,具备快速确认与低手续费优势。 +- **低成本高效率:** 通过资源优化与高效链上交互,实现更具性价比的 AI 服务。 +- **安全与隐私保护:** 基于去中心化登录机制,用户仅需通过 Web3 钱包签名即可完成认证,保障交互过程的安全与隐私。 From 273c72fb4fa23bac20e1570722a09845a70d5e6f Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:20:07 +0800 Subject: [PATCH 54/78] Delete i18n/zh-Hans/docusaurus-plugin-content-docs/llm-service directory --- .../llm-service/introduction.md | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/llm-service/introduction.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/llm-service/introduction.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/llm-service/introduction.md deleted file mode 100644 index 995f9af8..00000000 --- a/i18n/zh-Hans/docusaurus-plugin-content-docs/llm-service/introduction.md +++ /dev/null @@ -1,19 +0,0 @@ -# 欢迎使用 LLM Service - -## 关于 LLM Service - -LLM Service 是 Bank of AI 生态中的专业 AI 服务模块,基于顶级区块链基础设施构建,致力于为用户提供高效、易用且富有创造力的 AI 交互体验。作为 Bank of AI 的核心 AI 基础设施,该服务结合区块链的去中心化、安全性与高效率,打造全新的 AI 服务模式。 - -核心功能包括: - -- **多模型 AI 对话:** 集成多种行业领先的大语言模型(LLMs),用户可根据需求自由选择最合适的模型。 -- **强大的 AI 服务能力:** 提供完整的 AI API 服务,支持在 Bank of AI 框架内快速接入与集成。 -- **原生 Web3 体验:** 无缝集成主流 Web3 钱包,实现从登录到支付的一体化原生体验。 - -## 为什么选择 LLM Service? - -选择 LLM Service,即可同时享受区块链生态优势与高质量 AI 服务体验: - -- **多链生态优势:** 支持多链主流代币支付,具备快速确认与低手续费优势。 -- **低成本高效率:** 通过资源优化与高效链上交互,实现更具性价比的 AI 服务。 -- **安全与隐私保护:** 基于去中心化登录机制,用户仅需通过 Web3 钱包签名即可完成认证,保障交互过程的安全与隐私。 From 82c5e05120f412b376949015c804600d2b4d6ae4 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:21:37 +0800 Subject: [PATCH 55/78] Create quick-start.md --- .../current/llm-service/quick-start.md | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/quick-start.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/quick-start.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/quick-start.md new file mode 100644 index 00000000..e5dfe487 --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/quick-start.md @@ -0,0 +1,40 @@ +# 快速开始 + +本章节将引导你在 Bank of AI 平台上完成 LLM Service 的初始配置,快速开启 AI 使用体验。 + +## 1. 连接钱包 + +LLM Service 采用去中心化登录方式,支持通过 Web3 钱包进行授权登录,无需用户名和密码,安全便捷。 + +### 登录步骤: + +1. 访问 [Bank of AI Chat 平台](https://chat.bankofai.io/chat) +2. 点击右上角 **Log in** +3. 选择你的钱包并授权连接 +4. 在钱包中确认签名完成登录 + +登录成功后,右上角会显示你的钱包地址。 + +> 注意:请确保已安装兼容的 Web3 钱包,并妥善保管助记词和私钥。 + +--- + +## 2. 开始首次对话 + +进入平台后即可直接使用 LLM Service 进行 AI 交互。 + +- **选择模型:** 点击当前模型名称,选择需要使用的模型(如 GPT-4o、Claude、Gemini 等) +- **发送消息:** 在输入框输入内容,点击发送或按 Enter +- **多轮对话:** 支持上下文连续对话,AI 会基于历史内容进行响应 + +--- + +## 3. 余额与使用 + +LLM Service 采用积分(Credits)计费,通过链上支付获取。 + +- **充值入口:** 在 Dashboard 中进入 **Top up** +- **购买积分:** 使用支持的链上代币支付,确认钱包交易 +- **自动到账:** 交易确认后,系统自动为账户充值 + +完成以上步骤后,即可使用多种主流 AI 模型。 From 28ea31b19e9e9bd03fcf8f501d2fc9d4e35372b3 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:23:02 +0800 Subject: [PATCH 56/78] Update quick-start.md --- .../current/llm-service/quick-start.md | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/quick-start.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/quick-start.md index e5dfe487..72a3d567 100644 --- a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/quick-start.md +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/quick-start.md @@ -8,14 +8,14 @@ LLM Service 采用去中心化登录方式,支持通过 Web3 钱包进行授 ### 登录步骤: -1. 访问 [Bank of AI Chat 平台](https://chat.bankofai.io/chat) -2. 点击右上角 **Log in** +1. 访问 [Bank of AI 对话平台](https://chat.bankofai.io/chat) +2. 点击右上角 **登录** 3. 选择你的钱包并授权连接 4. 在钱包中确认签名完成登录 登录成功后,右上角会显示你的钱包地址。 -> 注意:请确保已安装兼容的 Web3 钱包,并妥善保管助记词和私钥。 +> 注意:请确保浏览器或移动设备中已安装兼容的 Web3 钱包,并妥善保管助记词和私钥。 --- @@ -23,9 +23,9 @@ LLM Service 采用去中心化登录方式,支持通过 Web3 钱包进行授 进入平台后即可直接使用 LLM Service 进行 AI 交互。 -- **选择模型:** 点击当前模型名称,选择需要使用的模型(如 GPT-4o、Claude、Gemini 等) -- **发送消息:** 在输入框输入内容,点击发送或按 Enter -- **多轮对话:** 支持上下文连续对话,AI 会基于历史内容进行响应 +- **选择模型:** 点击当前模型名称,从列表中选择需要使用的模型(如 GPT、Claude、Gemini 等) +- **发送消息:** 在输入框输入内容,点击发送按钮或按 Enter +- **多轮对话:** 支持连续上下文对话,AI 会根据历史内容进行响应 --- @@ -33,8 +33,8 @@ LLM Service 采用去中心化登录方式,支持通过 Web3 钱包进行授 LLM Service 采用积分(Credits)计费,通过链上支付获取。 -- **充值入口:** 在 Dashboard 中进入 **Top up** -- **购买积分:** 使用支持的链上代币支付,确认钱包交易 -- **自动到账:** 交易确认后,系统自动为账户充值 +- **充值入口:** 在控制台页面进入 **充值** +- **购买积分:** 使用支持的链上代币支付,在钱包中确认交易 +- **自动到账:** 区块链交易确认后,系统会自动为账户增加对应积分 -完成以上步骤后,即可使用多种主流 AI 模型。 +完成以上步骤后,你即可使用多种主流 AI 模型。 From c31773a7ae01c866bca10a20d1992fdd9a0804ee Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:25:59 +0800 Subject: [PATCH 57/78] Create pricing-and-usage.md --- .../current/llm-service/pricing-and-usage.md | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/pricing-and-usage.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/pricing-and-usage.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/pricing-and-usage.md new file mode 100644 index 00000000..ee8d59e7 --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/pricing-and-usage.md @@ -0,0 +1,56 @@ +# 计费与使用 + +## 积分与定价 + +Bank of AI 平台采用统一的积分(Credits)体系,对所有 AI 服务进行计量与结算。 + +- **计费规则:** 每次与 AI 交互所消耗的 Token,会根据不同模型的定价标准换算为对应积分,并从账户余额中扣除 +- **Token 消耗明细:** 在 AI 回复详情中可查看 Token 使用拆分,帮助理解消耗来源并优化使用策略 +- **模型定价差异:** 不同模型因能力与计算成本不同,价格有所差异。一般来说,能力越强的模型消耗越高 +- **联网搜索费用:** Web 搜索为额外收费功能,按次计费。部分模型不支持该功能(标记为 "-") + +### 模型定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | 联网搜索(积分/次) | +| :--- | :--- | :--- | :--- | +| ChatGPT-5.2 | 1.75 | 14.00 | 10,000 | +| ChatGPT-5-mini | 0.25 | 2.00 | 10,000 | +| ChatGPT-5-nano | 0.05 | 0.40 | - | +| Claude Opus 4.6 | 5.00 | 25.00 | 10,000 | +| Claude Opus 4.5 | 5.00 | 25.00 | 10,000 | +| Claude Sonnet 4.6 | 3.00 | 15.00 | 10,000 | +| Claude Sonnet 4.5 | 3.00 | 15.00 | 10,000 | +| Claude Haiku 4.5 | 1.00 | 5.00 | 10,000 | +| Gemini 3.1 Pro | 2.00 | 12.00 | 14,000 | +| Gemini 3 Flash | 0.50 | 3.00 | 14,000 | + +> **计算示例:** 若使用输入价格 1.25、输出价格 10.00 的模型,提问消耗 10 个输入 Token,回复消耗 50 个输出 Token,则总消耗为 **512.5 积分**(10 × 1.25 + 50 × 10)。可在聊天界面右下角悬停模型名称查看具体消耗。 + +--- + +## 使用数据 + +可在左侧导航栏进入 **使用情况** 页面查看详细数据: + +- **使用概览:** 当前积分余额与本月总消耗 +- **月度使用图:** 展示过去一年的使用趋势 +- **使用明细:** 每条记录对应一次 AI 交互,包括时间、模型、Token 用量、积分消耗和响应时间 + +--- + +## 充值 + +Bank of AI 采用预付费模式,基于区块链实现安全便捷的充值体验。 + +- **充值流程:** 在 **充值** 页面,通过已连接的钱包完成支付确认 +- **支持代币:** 支持多链主流代币(包括 TRON、BNB Chain 等) +- **到账时间:** 区块链确认后自动到账,通常为几分钟内 + +--- + +## 账单与记录 + +可在 **充值** 页面中的 **历史记录** 查看完整充值信息: + +- **充值记录:** 包括时间、类型、交易哈希、代币信息 +- **透明可查:** 可点击交易哈希,在区块链浏览器(如 TRONSCAN、BscScan)中查看详情 From f2c52dc5f2f6be126f112a687b4949490a6f93e9 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:28:35 +0800 Subject: [PATCH 58/78] Create chatgpt-5-2.md --- .../llm-service/models /chatgpt-5-2.md | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-2.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-2.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-2.md new file mode 100644 index 00000000..afdfbb6d --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-2.md @@ -0,0 +1,41 @@ +# ChatGPT-5.2 + +## 概述 + +ChatGPT-5.2 是 OpenAI 最新一代旗舰大语言模型。在 5.1 的基础上,进一步优化了多模态处理速度与复杂任务执行效率,适合追求高性能与高效率的专业用户。 + +--- + +## 核心特性 + +- **高效多模态处理:** 相比 5.1,大幅提升图像与视频内容的解析与生成速度,交互更流畅 +- **任务执行效率提升:** 优化推理引擎,在处理多步骤复杂任务时更快、更准确 +- **更强抗干扰能力:** 在噪声较多或指令模糊的情况下,依然保持较高准确性 + +--- + +## 适用场景 + +- **实时数据分析与可视化:** 快速处理数据流并生成复杂图表与分析报告 +- **复杂项目管理与规划:** 支持任务拆解、资源分配与风险评估 +- **高频高精度专业咨询:** 适用于金融分析、法律检索等需要快速准确响应的场景 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 极强。在复杂逻辑推理与科学计算方面保持领先,并提升效率 | +| **创造能力** | 极强。可生成高质量、结构化的专业内容 | +| **多模态能力** | 全面且高效。支持图像、视频、音频输入与理解,并可生成高质量图像 | +| **响应速度** | 中等偏慢。相比 5.1 有提升,但仍属于深度推理模型 | +| **上下文窗口** | 超大。支持百万级 Token 上下文 | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **ChatGPT-5.2** | 1.75 | 14.00 | From e30d12148e09048609067bf1e0c75eb14f7fa0ba Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:29:54 +0800 Subject: [PATCH 59/78] Create chatgpt-5-mini.md --- .../llm-service/models /chatgpt-5-mini.md | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-mini.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-mini.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-mini.md new file mode 100644 index 00000000..dd550add --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-mini.md @@ -0,0 +1,41 @@ +# ChatGPT-5-mini + +## 概述 + +ChatGPT-5-mini 是一款高效、经济的轻量级语言模型,针对日常对话与通用任务进行了优化,是 Bank of AI 生态中性价比极高的选择。 + +--- + +## 核心特性 + +- **极快响应:** 针对低延迟优化,提供接近实时的对话体验 +- **高性价比:** 在保证输出质量的同时,大幅降低计算成本 +- **稳定通用能力:** 覆盖日常问答、文本处理等多种场景,表现稳定 + +--- + +## 适用场景 + +- **日常对话与快速问答:** 适合作为智能助手,快速回答问题与聊天 +- **文本处理:** 支持摘要、润色、格式整理、关键词提取 +- **内容初稿生成:** 快速生成社交媒体文案、产品描述、博客草稿 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 中等。可处理基础逻辑,但复杂多步骤问题表现有限 | +| **创造能力** | 中等。文本流畅,但深度与专业性有限 | +| **多模态能力** | 不支持。仅支持文本处理 | +| **响应速度** | 快。平台中响应速度较快的模型之一 | +| **上下文窗口** | 标准。支持数万级 Token,满足大多数日常场景 | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **ChatGPT-5-mini** | 0.25 | 2.00 | From 8b9c1feb457c9ea240c4525d5b76481ccf9cdb59 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:30:56 +0800 Subject: [PATCH 60/78] Create chatgpt-5-nano.md --- .../llm-service/models /chatgpt-5-nano.md | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-nano.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-nano.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-nano.md new file mode 100644 index 00000000..38dd9559 --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-nano.md @@ -0,0 +1,30 @@ +# ChatGPT-5-nano + +## Overview +ChatGPT-5-nano is an advanced language model that strikes an excellent balance between performance, speed, and cost. It is designed to provide near-professional AI capabilities at a moderate cost within the Bank of AI ecosystem. + +## Key Features +* **Enhanced Reasoning Ability:** Nano shows significant improvements in logical reasoning, code generation, and multilingual processing compared to lighter models. +* **Efficient Performance:** Carefully tuned to maintain high output quality while sustaining a fast response speed. +* **Multifunctional Integration:** Capable of handling a diverse range of tasks, making it a powerful assistant for developers and content creators. + +## Best Use Cases +* **Code Assistance and Debugging:** Understanding and generating code across multiple programming languages, assisting with debugging and documentation. +* **Multilingual Translation and Writing:** Providing high-quality cross-language translation and creating content in authentic language styles. +* **Structured Content Generation:** Generating well-formatted reports, technical documents, tutorials, and other structured content. + +## Capabilities and Limitations + +| Capability | Detailed Description | +| :--- | :--- | +| **Reasoning Ability** | **Strong.** Can handle complex logical problems and programming tasks, performing well in specific domains. | +| **Creative Ability** | **Strong.** Generates creative and in-depth text content, meeting high writing requirements. | +| **Multimodal Ability** | **Limited Support.** Can understand and describe simple image content, but does not support deep multimodal analysis. | +| **Response Speed** | **Medium.** Faster than flagship models, though slightly slower than the mini model. | +| **Context Window** | **Large.** Supports a context window of hundreds of thousands of tokens for long document processing. | + +## Credits and Pricing + +| Model | Input (Credits/Token) | Output (Credits/Token) | +| :--- | :--- | :--- | +| **ChatGPT-5-nano** | 0.05 | 0.40 | From b8bbe37c98398c0ca0bfcdd7648f864f8d2f6954 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:32:36 +0800 Subject: [PATCH 61/78] Update chatgpt-5-nano.md --- .../llm-service/models /chatgpt-5-nano.md | 49 ++++++++++++------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-nano.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-nano.md index 38dd9559..25620e8d 100644 --- a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-nano.md +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-nano.md @@ -1,30 +1,41 @@ # ChatGPT-5-nano -## Overview -ChatGPT-5-nano is an advanced language model that strikes an excellent balance between performance, speed, and cost. It is designed to provide near-professional AI capabilities at a moderate cost within the Bank of AI ecosystem. +## 概述 -## Key Features -* **Enhanced Reasoning Ability:** Nano shows significant improvements in logical reasoning, code generation, and multilingual processing compared to lighter models. -* **Efficient Performance:** Carefully tuned to maintain high output quality while sustaining a fast response speed. -* **Multifunctional Integration:** Capable of handling a diverse range of tasks, making it a powerful assistant for developers and content creators. +ChatGPT-5-nano 是一款在性能、速度和成本之间取得良好平衡的语言模型。在 Bank of AI 生态中,它以适中的成本提供接近专业级的 AI 能力。 -## Best Use Cases -* **Code Assistance and Debugging:** Understanding and generating code across multiple programming languages, assisting with debugging and documentation. -* **Multilingual Translation and Writing:** Providing high-quality cross-language translation and creating content in authentic language styles. -* **Structured Content Generation:** Generating well-formatted reports, technical documents, tutorials, and other structured content. +--- -## Capabilities and Limitations +## 核心特性 -| Capability | Detailed Description | +- **推理能力增强:** 相比更轻量的模型,在逻辑推理、代码生成和多语言处理方面有明显提升 +- **高效性能:** 在保持较高输出质量的同时,维持较快响应速度 +- **多功能能力:** 可处理多种任务类型,是开发者与内容创作者的实用助手 + +--- + +## 适用场景 + +- **代码辅助与调试:** 支持多语言代码理解与生成,帮助调试和编写技术文档 +- **多语言翻译与写作:** 提供高质量跨语言翻译,并生成自然流畅的内容 +- **结构化内容生成:** 生成格式规范的报告、技术文档、教程等 + +--- + +## 能力与限制 + +| 能力 | 说明 | | :--- | :--- | -| **Reasoning Ability** | **Strong.** Can handle complex logical problems and programming tasks, performing well in specific domains. | -| **Creative Ability** | **Strong.** Generates creative and in-depth text content, meeting high writing requirements. | -| **Multimodal Ability** | **Limited Support.** Can understand and describe simple image content, but does not support deep multimodal analysis. | -| **Response Speed** | **Medium.** Faster than flagship models, though slightly slower than the mini model. | -| **Context Window** | **Large.** Supports a context window of hundreds of thousands of tokens for long document processing. | +| **推理能力** | 强。能够处理复杂逻辑问题与编程任务,在特定领域表现优秀 | +| **创造能力** | 强。可生成富有创意且内容深入的文本 | +| **多模态能力** | 有限支持。可理解和描述简单图像,但不支持深度多模态分析 | +| **响应速度** | 中等。快于旗舰模型,但略慢于 mini 模型 | +| **上下文窗口** | 大。支持数十万级 Token 上下文,适合长文档处理 | + +--- -## Credits and Pricing +## 积分与定价 -| Model | Input (Credits/Token) | Output (Credits/Token) | +| 模型 | 输入(积分/Token) | 输出(积分/Token) | | :--- | :--- | :--- | | **ChatGPT-5-nano** | 0.05 | 0.40 | From d8ef698b64175dc7fa3b32e1b21bc8dd5efde295 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:33:56 +0800 Subject: [PATCH 62/78] Create claude-haiku-4-5.md --- .../llm-service/models /claude-haiku-4-5.md | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-haiku-4-5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-haiku-4-5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-haiku-4-5.md new file mode 100644 index 00000000..3bf9282f --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-haiku-4-5.md @@ -0,0 +1,41 @@ +# Claude Haiku 4.5 + +## 概述 + +Claude Haiku 4.5 是 Anthropic 推出的轻量高速模型,已集成至 Bank of AI 平台。主打极低延迟与高并发,适用于实时交互场景。 + +--- + +## 核心特性 + +- **极致响应速度:** 超低延迟,适合即时交互应用 +- **高性价比:** 成本优势明显,适合大规模部署 +- **企业级稳定性:** 通过严格安全测试,满足企业级可靠性要求 + +--- + +## 适用场景 + +- **实时聊天与内容审核:** 提供流畅对话体验,快速处理用户内容 +- **移动端 AI 应用:** 针对低延迟与资源受限场景优化 +- **工作流自动化:** 邮件分类、会议总结、表单信息提取等 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 中等。可处理常规任务,但复杂多步骤问题能力有限 | +| **创造能力** | 中等。文本简洁流畅,更适合信息表达 | +| **多模态能力** | 支持。具备基础图像理解与描述能力 | +| **响应速度** | 极快。平台中响应最快的模型之一 | +| **上下文窗口** | 超大。支持长上下文与大规模文本处理 | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **Claude Haiku 4.5** | 1.00 | 5.00 | From d931aa5f66774d8ebdcafc060328fd9bbea7bad6 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:35:17 +0800 Subject: [PATCH 63/78] Create claude-opus-4-5.md --- .../llm-service/models /claude-opus-4-5.md | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-5.md new file mode 100644 index 00000000..1c678bad --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-5.md @@ -0,0 +1,41 @@ +# Claude Opus 4.5 + +## 概述 + +Claude Opus 4.5 是 Anthropic 推出的旗舰级 AI 模型,已集成至 Bank of AI 平台。以顶级智能、卓越性能和强伦理能力著称,适用于高端专业场景。 + +--- + +## 核心特性 + +- **顶级智能水平:** 在复杂推理、数学与编程等任务中表现领先,可解决高度复杂问题 +- **强大视觉理解:** 能准确解析复杂图表、科研图像及多图文档 +- **超长上下文处理:** 支持超大上下文窗口,可处理长文档及大型代码库 + +--- + +## 适用场景 + +- **复杂系统分析:** 理解流程图、API 文档,并自动生成跨系统代码 +- **高端科研分析:** 解读论文、分析实验数据、提出研究假设 +- **企业战略决策:** 分析市场、财务与法律信息,提供决策支持 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 顶级。在复杂逻辑与深度思考任务中表现领先 | +| **创造能力** | 极强。可生成逻辑严密、富有洞察力的高质量文本 | +| **多模态能力** | 强。擅长复杂视觉信息理解与分析 | +| **响应速度** | 较慢。优先保证深度与质量 | +| **上下文窗口** | 超大。支持 200K+ Token,上限可扩展 | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **Claude Opus 4.5** | 5.00 | 25.00 | From 3b538c9f064625ead4a47821edcf316ec0d43a18 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:36:48 +0800 Subject: [PATCH 64/78] Create claude-opus-4-6.md --- .../llm-service/models /claude-opus-4-6.md | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-6.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-6.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-6.md new file mode 100644 index 00000000..9637dd92 --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-6.md @@ -0,0 +1,43 @@ +# Claude Opus 4.6 + +## 概述 + +Claude Opus 4.6 是 Anthropic 旗舰模型系列的最新版本。在 4.5 强大智能与伦理能力的基础上,进一步强化复杂推理、多模态理解以及专业领域表现,为 Bank of AI 生态提供更精准、更强大的 AI 能力。 + +--- + +## 核心特性 + +- **复杂推理能力提升:** 针对多步骤、跨领域与抽象问题进行深度优化,适用于科学、工程与法律等复杂任务 +- **多模态理解增强:** 对图像、图表与视频的信息捕捉与上下文关联能力更强 +- **专业知识深化:** 在金融分析、药物研发、复杂系统设计等领域能力进一步增强 +- **相较 4.5 的提升:** 4.6 更注重 **深度与精度**,在高准确度任务中提供更可靠结果 + +--- + +## 适用场景 + +- **前沿科研分析:** 复杂数据分析、理论验证与新发现探索 +- **高级战略咨询:** 基于大规模信息进行深度分析与决策支持 +- **法律与合规审查:** 处理复杂法律文本并进行风险评估 +- **高端内容生成:** 撰写研究报告、技术白皮书与深度行业分析 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 顶级。相比 4.5 进一步提升,擅长深度推理与抽象逻辑 | +| **创造能力** | 极强。可生成逻辑严谨、见解深入的专业文本 | +| **多模态能力** | 增强。更擅长分析复杂视觉信息与多模态数据 | +| **响应速度** | 较慢。优先保证输出质量与分析深度 | +| **上下文窗口** | 超大。支持超长上下文,用于处理大型文档与代码库 | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **Claude Opus 4.6** | 5.00 | 25.00 | From 77f827d5c65aa3f850d22c7ef452af5664b51cc0 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:38:25 +0800 Subject: [PATCH 65/78] Create claude-sonnet-4-5.md --- .../llm-service/models /claude-sonnet-4-5.md | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-5.md new file mode 100644 index 00000000..ba39935e --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-5.md @@ -0,0 +1,41 @@ +# Claude Sonnet 4.5 + +## 概述 + +Claude Sonnet 4.5 是 Anthropic 推出的平衡型 AI 模型,已集成至 Bank of AI 平台。在接近旗舰级智能的同时提供更低成本,是企业规模化部署 AI 的理想选择。 + +--- + +## 核心特性 + +- **智能与速度兼顾:** 智能水平显著高于同类模型,响应速度约为 Opus 系列的两倍 +- **企业场景优化:** 针对知识检索、销售自动化和复杂数据分析等核心业务场景进行优化 +- **超长上下文与高召回:** 支持超大上下文窗口,在长文档中仍保持高信息召回率 + +--- + +## 适用场景 + +- **企业知识管理:** 从海量文档中快速检索信息,为客户或员工提供准确答案 +- **销售与营销自动化:** 分析市场数据、生成个性化营销文案、处理销售线索 +- **代码质量管理:** 高效生成与审查代码,帮助开发团队提升效率与质量 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 强。可满足大多数商业与专业应用场景 | +| **创造能力** | 强。生成符合商业与专业标准的高质量文本 | +| **多模态能力** | 支持。具备较强图像理解能力,可处理包含图表的文档 | +| **响应速度** | 中等。在智能与速度之间取得良好平衡 | +| **上下文窗口** | 超大。与 Opus 系列相同,支持长上下文处理 | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **Claude Sonnet 4.5** | 3.00 | 15.00 | From ceca1df86f4da0168571c3797c16195882947566 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:39:37 +0800 Subject: [PATCH 66/78] Create claude-sonnet-4-6.md --- .../llm-service/models /claude-sonnet-4-6.md | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-6.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-6.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-6.md new file mode 100644 index 00000000..3fe6c3eb --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-6.md @@ -0,0 +1,43 @@ +# Claude Sonnet 4.6 + +## 概述 + +Claude Sonnet 4.6 是 Anthropic Sonnet 系列的重要升级版本,在更低成本下提供接近 Opus 级别的性能。该版本在编程能力、计算机操作、长上下文推理和知识工作方面全面提升,并首次引入 **100 万 Token 上下文窗口(Beta)**,非常适合企业级自动化与复杂任务处理。 + +--- + +## 核心特性 + +- **接近 Opus 级性能:** 在多项基准测试中表现接近 Opus 4.6,尤其在企业文档处理(图表、PDF、表格)方面表现突出 +- **100 万 Token 上下文:** Sonnet 系列首次支持百万级上下文,可处理完整代码库或大型研究文档 +- **优秀编程与计算机操作能力:** 在复杂代码修复和多步骤网页工作流中表现接近甚至达到人类水平 +- **相较 4.5 的提升:** 指令遵循更强、幻觉更少,多步骤任务执行更加可靠 + +--- + +## 适用场景 + +- **企业文档分析:** 高效处理包含图表和表格的复杂 PDF 文档 +- **大型代码库重构:** 利用百万级上下文对整个代码库进行分析与修改 +- **自动化 Agent 工作流:** 适用于需要多步骤规划的自动化任务 +- **知识工作与设计:** 生成高质量内容和前端页面,减少反复迭代 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 强。在长上下文推理与复杂问题处理方面接近 Opus | +| **创造能力** | 强。尤其擅长生成高质量代码与具有设计感的前端页面 | +| **多模态能力** | 支持。可处理图像、PDF 等多模态输入 | +| **响应速度** | 中等。在性能与速度之间保持良好平衡 | +| **上下文窗口** | 100 万 Token(Beta),最大输出 128,000 Token | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **Claude Sonnet 4.6** | 3.00 | 15.00 | From 4235cb20e2acd19aa2420c4805d6af84ae14abf2 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:40:44 +0800 Subject: [PATCH 67/78] Create gemini-3-1-pro.md --- .../llm-service/models /gemini-3-1-pro.md | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-1-pro.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-1-pro.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-1-pro.md new file mode 100644 index 00000000..98d295d1 --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-1-pro.md @@ -0,0 +1,42 @@ +# Gemini 3.1 Pro + +## 概述 + +Gemini 3.1 Pro 是 Google 旗舰多模态模型的重要升级版本,在推理能力、输出可靠性和效率方面全面提升。在保留原生多模态能力的同时,显著增强复杂任务处理的准确性,并解决了旧版本输出截断的问题。 + +--- + +## 核心特性 + +- **推理能力大幅提升:** 在 ARC-AGI-2 基准测试中取得突破性成绩,复杂模式识别能力较前代提升超过一倍 +- **超长上下文与输出:** 支持最高 **100 万 Token 上下文**,单次输出可达 **64,000 Token**,避免长内容被截断 +- **原生多模态能力:** 支持文本、图像、音频、视频的统一理解与处理 +- **更高运行效率:** 优化 Token 使用,在复杂任务中提供更稳定、更高质量输出 + +--- + +## 适用场景 + +- **复杂代码生成:** 适用于需要深度推理和完整代码输出的软件开发任务 +- **大规模数据分析:** 一次性处理并分析大量文档或数据集 +- **长文本内容创作:** 撰写报告、剧本或技术白皮书,保持长篇逻辑一致性 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 极强。在抽象推理与新问题解决方面显著提升 | +| **创造能力** | 极强。可生成高质量、长篇且逻辑连贯的内容 | +| **多模态能力** | 原生支持。可无缝处理文本、图像、音频、视频混合输入 | +| **响应速度** | 中等偏慢。优先保证输出质量与完整性 | +| **上下文窗口** | 100 万 Token(最大输出 64,000 Token) | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **Gemini 3.1 Pro** | 2.00 | 12.00 | From c666fceadc5f34de1013e083f0f19a227600a4d8 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:42:07 +0800 Subject: [PATCH 68/78] Create gemini-3-flash.md --- .../llm-service/models /gemini-3-flash.md | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-flash.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-flash.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-flash.md new file mode 100644 index 00000000..e850ddd4 --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-flash.md @@ -0,0 +1,41 @@ +# Gemini 3 Flash + +## 概述 + +Gemini 3 Flash 是 Google Gemini 3 系列中速度最快、效率最高的模型。针对低延迟与高并发场景优化,在保留原生多模态能力的同时,大幅提升响应速度并降低成本。 + +--- + +## 核心特性 + +- **极速响应与高吞吐:** 适用于低延迟、高并发场景,是实时 AI 应用的首选 +- **高效多模态:** 支持图像、音频等多模态处理,相比 Pro 版本计算成本更低 +- **高性价比:** 在保证速度与多模态能力的同时显著降低使用成本 + +--- + +## 适用场景 + +- **实时聊天机器人:** 提供流畅、即时的多模态交互体验 +- **内容审核:** 快速识别与过滤文本和图像中的违规内容 +- **移动与边缘应用:** 适用于对延迟敏感、资源受限的场景 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 强。可处理大多数通用与复杂任务,但深度略低于 Pro | +| **创造能力** | 强。可快速生成高质量文本与多模态描述 | +| **多模态能力** | 原生支持且高效。侧重速度而非深度分析 | +| **响应速度** | 极快。平台中响应速度最快的模型之一 | +| **上下文窗口** | 超大。与 Pro 版本一致,支持长上下文 | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **Gemini 3 Flash** | 0.50 | 3.00 | From 70e6277251c969e1ec33823624ff6604ec9bd3ce Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:43:25 +0800 Subject: [PATCH 69/78] Create glm-5.md --- .../current/llm-service/models /glm-5.md | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /glm-5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /glm-5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /glm-5.md new file mode 100644 index 00000000..6b14389b --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /glm-5.md @@ -0,0 +1,50 @@ +# GLM-5 + +## 概述 + +**GLM-5** 是智谱 AI 推出的新一代旗舰基础模型,专为 **编程(Coding)** 与 **Agent** 场景设计。在开源复杂系统工程与长周期任务中达到 SOTA 表现,实际编码能力接近 **Claude Opus** 水平。 + +基于 **744B** 规模模型,结合异步强化学习与稀疏注意力机制,实现从“写代码”到“构建系统”的能力跃迁。 + +--- + +## 核心特性 + +- **大规模参数与数据:** 模型规模达 **744B(激活 40B)**,预训练数据 **28.5T**,显著提升知识广度与深度 +- **超长上下文与输出:** 支持 **200K Token 上下文**,最大输出 **128K Token**,适合复杂代码与多步骤任务 +- **强大编程与 Agent 能力:** 编程能力系统性增强,低幻觉率,高 Token 利用效率 +- **多种思维模式:** 支持不同推理模式,提升问题解决的灵活性与深度 + +--- + +## 适用场景 + +1. **复杂系统工程:** 软件系统构建、架构设计与优化 +2. **长周期 Agent 任务:** 多步骤规划与执行的自动化流程 +3. **高精度代码调试:** 提供接近人类水平的编程辅助 +4. **大规模文档分析:** 海量文档的信息提取与总结 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 极强。擅长复杂逻辑推理与多步骤规划 | +| **创造能力** | 极强。尤其在代码生成与系统设计方面表现突出 | +| **多模态能力** | 以文本与代码为主,可结合智谱平台视觉能力 | +| **响应速度** | 30–50 tokens/s,在质量与速度之间取得平衡 | +| **上下文窗口** | 200K Token | +| **最大输出** | 128K Token | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **GLM-5** | 0.30 | 2.55 | + +--- + +> **说明:** 在编程场景中建议提供清晰的系统提示词,并充分利用 128K 输出能力构建完整模块。 From ef5dd5f3d9c1139086ef2fc7101c47b96fb4e62b Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:44:42 +0800 Subject: [PATCH 70/78] Create kimi-k2.5.md --- .../current/llm-service/models /kimi-k2.5.md | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /kimi-k2.5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /kimi-k2.5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /kimi-k2.5.md new file mode 100644 index 00000000..329eae0b --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /kimi-k2.5.md @@ -0,0 +1,51 @@ +# Kimi-K2.5 + +## 概述 + +**Kimi-K2.5** 是 Moonshot AI 推出的多功能旗舰模型,采用 **原生多模态架构**,同时支持图像与文本输入、思考模式与快速模式,以及对话与 Agent 任务。 + +凭借 **256K 超长上下文窗口**、多模态理解能力以及强大的 Tool Calling 能力,Kimi-K2.5 为视觉编程与多 Agent 系统提供强大支持,帮助开发者构建下一代 AI 应用。 + +--- + +## 核心特性 + +- **原生多模态架构:** 支持图像与文本混合输入,在视觉理解与视觉编程方面表现突出 +- **256K 超长上下文:** 支持 **256,000 Token** 上下文,适合长文本推理与大规模数据处理 +- **Agent 集群与工具调用:** 支持 Agent 集群预览功能(最多 **100 个子 Agent** 与 **1,500 次工具调用**),速度比单 Agent 快约 4.5 倍 +- **强大编程能力:** 在 SWE-Bench 与 LiveCodeBench 等基准测试中表现领先 +- **思考模式:** 可在快速响应模式与深度推理模式之间灵活切换 + +--- + +## 适用场景 + +1. **视觉编程与自动化:** 网页像素级复刻与办公自动化任务 +2. **超长文本分析:** 法律文档审查、大规模研究报告分析、完整代码库理解 +3. **多 Agent 协作:** 构建复杂自动化流程与多 Agent 协同系统 +4. **专业代码生成:** 高效生成、优化与调试代码 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 极强。擅长长上下文推理与 Agent 任务规划 | +| **创造能力** | 极强。尤其在视觉编程与多模态内容生成方面表现突出 | +| **多模态能力** | 原生多模态。视觉理解能力优秀 | +| **响应速度** | 快速模式响应迅速;Agent 集群模式支持高效并行 | +| **上下文窗口** | 256,000 Token | +| **最大输出** | 256,000 Token | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **Kimi-K2.5** | 0.23 | 3.00 | + +--- + +> **提示:** 在复杂系统设计任务中,建议先使用 **思考模式**进行架构规划,再切换到标准模式执行代码生成。 From c3035af26dbdc0277a97d5bf671207f1ac6c3f1c Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:46:07 +0800 Subject: [PATCH 71/78] Create minimax-m2.5.md --- .../llm-service/models /minimax-m2.5.md | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /minimax-m2.5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /minimax-m2.5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /minimax-m2.5.md new file mode 100644 index 00000000..f1ab94be --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /minimax-m2.5.md @@ -0,0 +1,51 @@ +# MiniMax-M2.5 + +## 概述 + +**MiniMax-M2.5** 是 MiniMax 自主研发的旗舰多模态通用大模型,专为 **高吞吐、低延迟** 的生产环境设计。 + +该模型在编程与 Agent 能力方面表现领先,可原生理解和生成多种模态内容,包括 **文本、音频、图像、视频和音乐**。M2.5 在保持顶级性能的同时大幅降低成本,特别适用于复杂任务处理与专业办公场景。 + +--- + +## 核心特性 + +- **领先的编程与 Agent 能力:** 在 **Multi-SWE-Bench** 基准测试中表现领先,决策能力更成熟,Token 使用效率更高 +- **高效多模态处理:** 原生支持文本、音频、图像、视频与音乐的融合处理 +- **超长上下文能力:** 支持 **197K Token 上下文窗口**,通过强化学习优化复杂任务拆解 +- **高吞吐低延迟:** 提供 **100 TPS** 与 **50 TPS** 两种版本,适合高并发生产环境 +- **办公场景增强:** 在 Word、PPT、Excel 财务建模等专业办公任务中能力显著提升 + +--- + +## 适用场景 + +1. **企业自动化工作流:** 适合需要多模态处理与 Agent 决策的自动化系统 +2. **软件开发与代码辅助:** 在大型复杂代码库中提供高效生成与调试能力 +3. **多模态内容创作:** 支持跨模态内容生成,例如文本生成视频或图像生成音乐 +4. **办公文档处理:** 高效处理 Word、Excel、PPT 等文档的信息提取与分析 + +--- + +## 能力与限制 + +| 能力 | 说明 | +| :--- | :--- | +| **推理能力** | 极强。适合多步骤 Agent 任务与复杂决策 | +| **创造能力** | 极强。擅长多模态创作与办公自动化 | +| **多模态能力** | 原生多模态。支持文本、音频、图像、视频和音乐 | +| **响应速度** | 极快。提供 100 TPS 与 50 TPS 高吞吐版本 | +| **上下文窗口** | 197,000 Token | +| **最大输出** | 131,000 Token | + +--- + +## 积分与定价 + +| 模型 | 输入(积分/Token) | 输出(积分/Token) | +| :--- | :--- | :--- | +| **MiniMax-M2.5** | 0.30 | 1.20 | + +--- + +> **提示:** 对于需要处理大量请求的生产环境(例如每小时处理成千上万份文档),MiniMax-M2.5 在速度与成本之间提供了极佳平衡。 From 76342f26ac928ec0581e82d7876d2877f2994949 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:50:08 +0800 Subject: [PATCH 72/78] Create integration-guide.md --- .../openclaw /integration-guide.md | 228 ++++++++++++++++++ 1 file changed, 228 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /integration-guide.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /integration-guide.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /integration-guide.md new file mode 100644 index 00000000..936bd890 --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /integration-guide.md @@ -0,0 +1,228 @@ +# 集成 OpenClaw 与 Bank of AI +## 从零到私人 AI Agent:15 分钟部署 OpenClaw + Bank of AI + +OpenClaw(原名 ClawdBot / Moltbot)是一个开源的个人 AI 助手。与云端 SaaS 工具不同,OpenClaw 运行在本地设备上,让你完全掌控数据与工作流程。 + +你可以通过 WhatsApp、Telegram、Lark、钉钉等消息平台与其交互,用于处理邮件、管理日历、编写代码,甚至自动化智能家居。 + +OpenClaw 不只是聊天工具,而是具备实际执行能力的 **AI Agent**,支持: + +- 持久化记忆 +- 本地文件访问 +- 互联网访问 +- 技能扩展 + +本指南将带你完成安装、配置,并连接 Bank of AI API,快速搭建属于你的 AI Agent。 + +--- + +# Step 1 获取 Bank of AI API Key + +1. 打开:https://chat.bankofai.io/chat +2. 登录账户 +3. 进入 API Key 管理页面 +4. 创建并复制 API Key + +--- + +# Step 2 准备系统环境 + +| 要求 | 说明 | +|---|---| +| Node.js | ≥ v22 | +| 系统 | macOS / Linux / Windows(WSL2) | +| 包管理 | npm | + +检查版本: + +```bash +node -v +``` + +若版本过低,请前往:https://nodejs.org/ + +--- + +# Step 3 安装 OpenClaw + +```bash +npm install -g openclaw +``` + +--- + +## 常见问题 + +### Sharp 报错 + +```bash +npm install -g openclaw --force +``` + +### 命令找不到 + +```bash +npm config get prefix +``` + +例如输出: + +``` +/usr/local +``` + +加入环境变量: + +```bash +export PATH="/usr/local/bin:$PATH" +source ~/.zshrc +``` + +--- + +# Step 4 初始化 + +```bash +openclaw onboard +``` + +操作建议: + +- 模型配置:选择 **Skip for now** +- 渠道:可跳过 +- Skills:选择 No + +--- + +# Step 5 配置 Bank of AI + +## 5.1 编辑配置文件 + +```bash +vim ~/.openclaw/openclaw.json +``` + +加入: + +```json +{ + "models": { + "mode": "merge", + "providers": { + "bankofai": { + "baseUrl": "https://api.bankofai.io/v1/", + "apiKey": "{BANKOFAI_API_KEY}", + "api": "openai-completions", + "models": [ + { "id": "gpt-5.2", "name": "gpt-5.2" }, + { "id": "gpt-5-mini", "name": "gpt-5-mini" }, + { "id": "gpt-5-nano", "name": "gpt-5-nano" }, + { "id": "claude-opus-4.6", "name": "claude-opus-4.6" }, + { "id": "claude-sonnet-4.6", "name": "claude-sonnet-4.6" }, + { "id": "claude-haiku-4.5", "name": "claude-haiku-4.5" } + ] + } + } + } +} +``` + +--- + +## 5.2 设置默认模型 + +```json +{ + "agents": { + "default": { + "model": "bankofai/gpt-5-nano" + } + } +} +``` + +--- + +## 5.3 重启服务 + +```bash +openclaw gateway restart +``` + +--- + +## 5.4 测试 + +```bash +openclaw agent --agent main --message "Hello" +``` + +--- + +# Step 6 Gateway 与诊断 + +常用命令: + +| 操作 | 命令 | +|---|---| +| 启动 | openclaw gateway start | +| 停止 | openclaw gateway stop | +| 重启 | openclaw gateway restart | +| 状态 | openclaw gateway status | + +诊断: + +```bash +openclaw doctor +``` + +--- + +# Step 7 启动 + +## Web UI + +```bash +openclaw ui +``` + +打开: + +``` +http://127.0.0.1:18789 +``` + +--- + +## 终端 UI + +```bash +openclaw tui +``` + +--- + +# Step 8 常用命令 + +```bash +openclaw models status +openclaw channels list +openclaw memory search "keyword" +openclaw docs +``` + +--- + +# 完成 + +你已经成功部署: + +**OpenClaw + Bank of AI AI Agent** + +你可以: + +- 自动化流程 +- 接入 Telegram +- 构建 AI 产品 + +🚀 From 7a97ce61cd993b49772de31dbab2f7cc2e7a643f Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:55:25 +0800 Subject: [PATCH 73/78] Create one-click-script-tutorial.md --- .../openclaw /one-click-script-tutorial.md | 125 ++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /one-click-script-tutorial.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /one-click-script-tutorial.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /one-click-script-tutorial.md new file mode 100644 index 00000000..214c440b --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /one-click-script-tutorial.md @@ -0,0 +1,125 @@ +## 快速开始 + +**在运行脚本前,请确保:** + +1. 已安装 Node.js 22 或以上版本 +2. 已安装并初始化 OpenClaw(已执行 `openclaw onboard`) +3. 网络正常,可访问 Bank of AI API + +--- + +## 脚本命令 + +### Linux & macOS + +Mac 用户:打开「终端」,输入: + +```bash +curl https://chat.bankofai.io/scripts/openclaw-install-bankofai-provider.sh | bash +``` + +--- + +### Windows PowerShell + +Windows 用户:打开「PowerShell」(不支持 CMD),输入: + +```powershell +iwr https://chat.bankofai.io/scripts/openclaw-install-bankofai-provider.ps1 | iex +``` + +--- + +## 详细步骤 + +### 1 获取 API Key + +1. 登录 Bank of AI 平台 + https://chat.bankofai.io/ + +2. 进入 API Key 页面 + https://chat.bankofai.io/key + +3. 创建新的 API Key + +--- + +### 2 运行安装脚本 + +根据系统执行对应命令,脚本会自动: + +- 检查环境(Node.js、OpenClaw 等) +- 提示输入 API Key + +--- + +### 3 选择默认模型 + +验证 API Key 后,脚本会获取模型列表并提示选择默认模型。 + +> 注意:Gemini 系列模型在 OpenClaw 中存在兼容性问题(函数调用限制),建议谨慎选择。 + +--- + +### 4 完成配置 + +脚本会自动执行: + +- 备份原配置 +- 更新 OpenClaw 配置文件 +- 重启 Gateway + +--- + +### 5 切换模型 + +#### 方法一:命令行 + +```bash +openclaw models set bankofai/ +``` + +--- + +#### 方法二:Web 界面 + +打开: + +``` +http://127.0.0.1:18789/ +``` + +操作: + +- 左侧点击 Agent +- 在 Primary model 中选择模型 + +> 注意:如果通过 Dashboard 修改模型,配置文件会新增 `list` 字段,此时命令行切换将失效。 + +--- + +## 兼容性测试 + +| 系统 | 状态 | +|---|---| +| Ubuntu 24.04 | ✅ 通过 | +| Windows 11 | ✅ 通过 | +| macOS | ✅ 通过 | + +--- + +## 常见问题 + +### Q:脚本执行失败怎么办? + +请确认: + +1. Node.js ≥ 22 +2. 已执行 `openclaw onboard` +3. 网络正常,可访问 Bank of AI + +--- + +### Q:如何切换模型? + +参考上方「步骤 5 切换模型」。 From a8439ced2d49ef2c7a7da2c5997f808aa04454c3 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 18:59:11 +0800 Subject: [PATCH 74/78] Update one-click-script-tutorial.md --- .../llm-service/openclaw /one-click-script-tutorial.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /one-click-script-tutorial.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /one-click-script-tutorial.md index 214c440b..ee4e9327 100644 --- a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /one-click-script-tutorial.md +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /one-click-script-tutorial.md @@ -42,6 +42,8 @@ iwr https://chat.bankofai.io/scripts/openclaw-install-bankofai-provider.ps1 | ie 3. 创建新的 API Key +![](https://files.readme.io/354a3d414f37e7df28f2cbf92dd055db9b67a20cab8738f6d5ac007226b6931b-image.png) + --- ### 2 运行安装脚本 @@ -51,12 +53,16 @@ iwr https://chat.bankofai.io/scripts/openclaw-install-bankofai-provider.ps1 | ie - 检查环境(Node.js、OpenClaw 等) - 提示输入 API Key +![](https://files.readme.io/ef091efb8911db673af8a5eade7b281a52f641c93d41fbd57cb898ee91893e76-Image_16-3-2026_at_7.00PM.png) + --- ### 3 选择默认模型 验证 API Key 后,脚本会获取模型列表并提示选择默认模型。 +![](https://files.readme.io/be3b9162405e38988898261db3effa8adcf92b6d9e37181d36b32c1cf8bcd1e3-Image_16-3-2026_at_7.01PM.png) + > 注意:Gemini 系列模型在 OpenClaw 中存在兼容性问题(函数调用限制),建议谨慎选择。 --- @@ -69,6 +75,8 @@ iwr https://chat.bankofai.io/scripts/openclaw-install-bankofai-provider.ps1 | ie - 更新 OpenClaw 配置文件 - 重启 Gateway +![](https://files.readme.io/a08c7fcee0cbe906042ef52fefa15348750ad5cd2db8c4f555f92ecabba72761-Image_16-3-2026_at_7.03PM.png) + --- ### 5 切换模型 @@ -94,6 +102,8 @@ http://127.0.0.1:18789/ - 左侧点击 Agent - 在 Primary model 中选择模型 +![](https://files.readme.io/3668289b53d185d158dd8393f46c0e171c0d301b5bdc624792c8cf8e6f9c4936-16-3-26_6.56.png) + > 注意:如果通过 Dashboard 修改模型,配置文件会新增 `list` 字段,此时命令行切换将失效。 --- From 47a6647384e7b8d4f05661d45b87570efa6aa3af Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 19:07:28 +0800 Subject: [PATCH 75/78] Create API.md --- .../current/llm-service/api/API.md | 216 ++++++++++++++++++ 1 file changed, 216 insertions(+) create mode 100644 i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/api/API.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/api/API.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/api/API.md new file mode 100644 index 00000000..8ac72e5e --- /dev/null +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/api/API.md @@ -0,0 +1,216 @@ +# BANK OF AI LLM API(OpenAI 兼容) + +用于 `/v1/models` 和 `/v1/chat/completions` 的 OpenAI 格式 API 规范。 + +## 版本 +1.0 + +**协议:** https +**Host:** api.bankofai.io + +--- + +# /v1/chat/completions + +## POST + +### 概述 +创建聊天补全(OpenAI 兼容) + +### 描述 +聊天补全接口。 +认证方式:Bearer Token。 + +- 非流式返回:JSON 格式,内容在 `choices[].content` +- 流式返回:SSE 事件流,内容在 `choices[].delta.content` + +--- + +## 参数 + +| 参数 | 位置 | 说明 | 必填 | 类型 | +|-----|-----|-----|-----|-----| +| Authorization | header | Bearer ``,例如 `Bearer sk-xxx` | 是 | string | +| body | body | 请求体(必填:model、messages;可选:stream、max_tokens、temperature、top_p、stop、n) | 是 | main.ChatCompletionsRequest | + +--- + +## 返回 + +| 状态码 | 说明 | 类型 | +|------|------|------| +| 200 | 非流式:`choices[].content`;流式(SSE):每个块为 ChatCompletionsStreamChunk | main.ChatCompletionsResponse | +| 401 | 认证失败 | object | + +--- + +# /v1/models + +## GET + +### 概述 +获取模型列表(OpenAI 兼容) + +### 描述 +返回可用模型列表。 +认证方式:Bearer Token。 + +返回结构: + +- object +- success +- data + +--- + +## 参数 + +| 参数 | 位置 | 说明 | 必填 | 类型 | +|-----|-----|-----|-----|-----| +| Authorization | header | Bearer ``,例如 `Bearer sk-xxx` | 是 | string | + +--- + +## 返回 + +| 状态码 | 说明 | 类型 | +|------|------|------| +| 200 | 返回模型列表:`{ object, success, data[] }` | main.V1ModelsResponse | +| 401 | 认证失败 | object | + +--- + +# 数据模型 + +## main.ChatChoice + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| content | string | 返回内容 | 否 | +| finish_reason | string | 结束原因,例如 `"stop"` | 否 | +| index | integer | 结果序号 | 否 | + +--- + +## main.ChatCompletionsRequest + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| frequency_penalty | number | 频率惩罚:-2.0 ~ 2.0,降低重复 token,默认 0 | 否 | +| max_tokens | integer | 最大生成 token 数 | 否 | +| messages | array | 对话消息列表 | 是 | +| model | string | 使用的模型 ID,例如 `"gpt-4"` | 是 | +| n | integer | 生成结果数量,默认 1 | 否 | +| presence_penalty | number | 出现惩罚:-2.0 ~ 2.0,降低重复主题 | 否 | +| response_format | object | 输出格式:`text`、`json_object` 或 `json_schema` | 否 | +| seed | integer | 随机种子(支持时可用于复现结果) | 否 | +| stop | string / array | 停止生成的字符串(最多 4 个) | 否 | +| stream | boolean | 是否使用流式返回(SSE) | 否 | +| temperature | number | 采样温度 0~2,越高越随机 | 否 | +| tool_choice | string / object | 工具调用控制(none / auto / function) | 否 | +| tools | array | 可调用工具列表 | 否 | +| top_p | number | 核采样参数(默认 1) | 否 | +| user | string | 可选用户标识(用于监控滥用) | 否 | + +--- + +## main.ChatCompletionsResponse + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| choices | array | 生成结果 | 否 | +| created | integer | 创建时间戳,例如 `1677652288` | 否 | +| id | string | 响应 ID,例如 `"chatcmpl-xxx"` | 否 | +| model | string | 使用的模型 | 否 | +| object | string | 对象类型,例如 `"chat.completion"` | 否 | +| usage | object | Token 使用统计 | 否 | + +--- + +## main.ChatMessage + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| content | string | 消息内容,例如 `"Hello"` | 否 | +| name | string | 消息作者名称(可选) | 否 | +| role | string | 角色:`system` / `user` / `assistant` / `tool` | 否 | +| tool_call_id | string | 工具调用 ID(role=tool 时使用) | 否 | +| tool_calls | array | assistant 调用工具时返回的调用信息 | 否 | + +--- + +## main.ChatResponseFormat + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| json_schema | object | JSON Schema(type 为 json_schema 时) | 否 | +| type | string | 输出类型:`text` 或 `json_object` | 否 | + +--- + +## main.ChatTool + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| function | object | 函数定义(name、description、parameters) | 否 | +| type | string | 必须为 `"function"` | 否 | + +--- + +## main.ChatToolCallFunction + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| arguments | string | JSON 字符串形式的参数 | 否 | +| name | string | 函数名称 | 否 | + +--- + +## main.ChatToolCallItem + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| function | object | 调用函数信息 | 否 | +| id | string | 工具调用 ID | 否 | +| type | string | `"function"` | 否 | + +--- + +## main.ChatToolFunction + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| description | string | 函数说明 | 否 | +| name | string | 函数名称 | 否 | +| parameters | object | 参数 JSON Schema | 否 | + +--- + +## main.ChatUsage + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| completion_tokens | integer | 输出 token 数 | 否 | +| prompt_tokens | integer | 输入 token 数 | 否 | +| total_tokens | integer | 总 token 数 | 否 | + +--- + +## main.V1ModelItem + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| created | integer | 创建时间,例如 `1626777600` | 否 | +| id | string | 模型 ID,例如 `"gpt-4"` | 否 | +| object | string | `"model"` | 否 | +| owned_by | string | 所属组织,例如 `"openai"` | 否 | + +--- + +## main.V1ModelsResponse + +| 字段 | 类型 | 说明 | 必填 | +|----|----|----|----| +| data | array | 模型列表 | 否 | +| object | string | `"list"` | 否 | +| success | boolean | 是否成功,例如 `true` | 否 | From 347d776592e135718a5d473f7fcde939eac34b70 Mon Sep 17 00:00:00 2001 From: ai-bankofai Date: Tue, 17 Mar 2026 19:24:05 +0800 Subject: [PATCH 76/78] Update API.md --- .../current/llm-service/api/API.md | 289 +++++++++--------- 1 file changed, 147 insertions(+), 142 deletions(-) diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/api/API.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/api/API.md index 8ac72e5e..b03f16f3 100644 --- a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/api/API.md +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/api/API.md @@ -1,216 +1,221 @@ -# BANK OF AI LLM API(OpenAI 兼容) +# Bank of AI LLM API(兼容 OpenAI) -用于 `/v1/models` 和 `/v1/chat/completions` 的 OpenAI 格式 API 规范。 +本接口与 **OpenAI API 完全兼容**,你可以直接复用现有 OpenAI SDK,仅需替换 Base URL 即可接入。 -## 版本 -1.0 +支持接口: -**协议:** https -**Host:** api.bankofai.io +- `/v1/chat/completions`:对话生成 +- `/v1/models`:获取模型列表 --- -# /v1/chat/completions +## 基本信息 -## POST +- **版本:** 1.0 +- **协议:** https +- **Host:** api.bankofai.io -### 概述 -创建聊天补全(OpenAI 兼容) +--- -### 描述 -聊天补全接口。 -认证方式:Bearer Token。 +# /v1/chat/completions -- 非流式返回:JSON 格式,内容在 `choices[].content` -- 流式返回:SSE 事件流,内容在 `choices[].delta.content` +## POST ---- +### 功能说明 +创建对话补全(Chat Completion),接口格式与 OpenAI 一致。 -## 参数 +### 认证方式 +在 Header 中传入: -| 参数 | 位置 | 说明 | 必填 | 类型 | -|-----|-----|-----|-----|-----| -| Authorization | header | Bearer ``,例如 `Bearer sk-xxx` | 是 | string | -| body | body | 请求体(必填:model、messages;可选:stream、max_tokens、temperature、top_p、stop、n) | 是 | main.ChatCompletionsRequest | +``` +Authorization: Bearer sk-xxxx +``` --- -## 返回 +### 返回方式 -| 状态码 | 说明 | 类型 | -|------|------|------| -| 200 | 非流式:`choices[].content`;流式(SSE):每个块为 ChatCompletionsStreamChunk | main.ChatCompletionsResponse | -| 401 | 认证失败 | object | +- **普通模式**:返回完整 JSON(内容在 `choices[].content`) +- **流式模式**:SSE 流(内容在 `choices[].delta.content`) --- -# /v1/models - -## GET +## 请求参数 -### 概述 -获取模型列表(OpenAI 兼容) +| 参数 | 位置 | 说明 | 必填 | +|-----|-----|-----|-----| +| Authorization | header | Bearer Token,例如 `Bearer sk-xxx` | 是 | +| body | body | 请求体(必须包含 model 和 messages) | 是 | -### 描述 -返回可用模型列表。 -认证方式:Bearer Token。 +--- -返回结构: +## 请求体字段说明(body) -- object -- success -- data +| 字段 | 类型 | 说明 | +|----|----|----| +| model | string | 模型 ID,例如 `gpt-4` | +| messages | array | 对话消息列表 | +| max_tokens | integer | 最大生成 token 数 | +| temperature | number | 随机性(0~2) | +| top_p | number | 核采样 | +| n | integer | 返回结果数量 | +| stream | boolean | 是否开启流式输出 | +| stop | string / array | 停止词 | +| presence_penalty | number | 主题惩罚 | +| frequency_penalty | number | 频率惩罚 | +| tools | array | 可调用工具 | +| tool_choice | string / object | 工具调用策略 | +| response_format | object | 输出格式 | +| user | string | 用户标识 | --- -## 参数 +## 返回结果 -| 参数 | 位置 | 说明 | 必填 | 类型 | -|-----|-----|-----|-----|-----| -| Authorization | header | Bearer ``,例如 `Bearer sk-xxx` | 是 | string | +| 状态码 | 说明 | +|------|------| +| 200 | 成功返回结果(普通 / 流式) | +| 401 | 认证失败 | --- -## 返回 - -| 状态码 | 说明 | 类型 | -|------|------|------| -| 200 | 返回模型列表:`{ object, success, data[] }` | main.V1ModelsResponse | -| 401 | 认证失败 | object | +# /v1/models ---- +## GET -# 数据模型 +### 功能说明 +获取当前可用模型列表。 -## main.ChatChoice +### 认证方式 -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| content | string | 返回内容 | 否 | -| finish_reason | string | 结束原因,例如 `"stop"` | 否 | -| index | integer | 结果序号 | 否 | +``` +Authorization: Bearer sk-xxxx +``` --- -## main.ChatCompletionsRequest - -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| frequency_penalty | number | 频率惩罚:-2.0 ~ 2.0,降低重复 token,默认 0 | 否 | -| max_tokens | integer | 最大生成 token 数 | 否 | -| messages | array | 对话消息列表 | 是 | -| model | string | 使用的模型 ID,例如 `"gpt-4"` | 是 | -| n | integer | 生成结果数量,默认 1 | 否 | -| presence_penalty | number | 出现惩罚:-2.0 ~ 2.0,降低重复主题 | 否 | -| response_format | object | 输出格式:`text`、`json_object` 或 `json_schema` | 否 | -| seed | integer | 随机种子(支持时可用于复现结果) | 否 | -| stop | string / array | 停止生成的字符串(最多 4 个) | 否 | -| stream | boolean | 是否使用流式返回(SSE) | 否 | -| temperature | number | 采样温度 0~2,越高越随机 | 否 | -| tool_choice | string / object | 工具调用控制(none / auto / function) | 否 | -| tools | array | 可调用工具列表 | 否 | -| top_p | number | 核采样参数(默认 1) | 否 | -| user | string | 可选用户标识(用于监控滥用) | 否 | +## 返回结果 ---- +| 状态码 | 说明 | +|------|------| +| 200 | 返回模型列表 | +| 401 | 认证失败 | -## main.ChatCompletionsResponse +返回结构: -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| choices | array | 生成结果 | 否 | -| created | integer | 创建时间戳,例如 `1677652288` | 否 | -| id | string | 响应 ID,例如 `"chatcmpl-xxx"` | 否 | -| model | string | 使用的模型 | 否 | -| object | string | 对象类型,例如 `"chat.completion"` | 否 | -| usage | object | Token 使用统计 | 否 | +```json +{ + "object": "list", + "success": true, + "data": [ + { + "id": "gpt-4", + "object": "model", + "created": 1626777600, + "owned_by": "openai" + } + ] +} +``` --- -## main.ChatMessage +# 数据结构说明 + +## ChatChoice(生成结果) -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| content | string | 消息内容,例如 `"Hello"` | 否 | -| name | string | 消息作者名称(可选) | 否 | -| role | string | 角色:`system` / `user` / `assistant` / `tool` | 否 | -| tool_call_id | string | 工具调用 ID(role=tool 时使用) | 否 | -| tool_calls | array | assistant 调用工具时返回的调用信息 | 否 | +| 字段 | 类型 | 说明 | +|----|----|----| +| content | string | AI 返回内容 | +| finish_reason | string | 结束原因,例如 `stop` | +| index | integer | 结果序号 | --- -## main.ChatResponseFormat +## ChatMessage(对话消息) -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| json_schema | object | JSON Schema(type 为 json_schema 时) | 否 | -| type | string | 输出类型:`text` 或 `json_object` | 否 | +| 字段 | 类型 | 说明 | +|----|----|----| +| role | string | system / user / assistant / tool | +| content | string | 消息内容 | +| name | string | 可选用户名 | +| tool_call_id | string | 工具调用 ID | +| tool_calls | array | 工具调用信息 | --- -## main.ChatTool +## ChatCompletionsResponse(响应结构) -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| function | object | 函数定义(name、description、parameters) | 否 | -| type | string | 必须为 `"function"` | 否 | +| 字段 | 类型 | 说明 | +|----|----|----| +| id | string | 请求 ID | +| object | string | 类型 | +| created | integer | 时间戳 | +| model | string | 模型名称 | +| choices | array | 生成结果 | +| usage | object | Token 使用统计 | --- -## main.ChatToolCallFunction +## Token 使用统计(Usage) -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| arguments | string | JSON 字符串形式的参数 | 否 | -| name | string | 函数名称 | 否 | +| 字段 | 类型 | 说明 | +|----|----|----| +| prompt_tokens | integer | 输入 token | +| completion_tokens | integer | 输出 token | +| total_tokens | integer | 总 token | --- -## main.ChatToolCallItem +## 工具调用(Function Calling) -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| function | object | 调用函数信息 | 否 | -| id | string | 工具调用 ID | 否 | -| type | string | `"function"` | 否 | +支持 OpenAI 标准工具调用: ---- +### 工具定义 -## main.ChatToolFunction - -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| description | string | 函数说明 | 否 | -| name | string | 函数名称 | 否 | -| parameters | object | 参数 JSON Schema | 否 | +```json +{ + "type": "function", + "function": { + "name": "get_weather", + "description": "获取天气", + "parameters": {} + } +} +``` --- -## main.ChatUsage +### 模型调用返回 -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| completion_tokens | integer | 输出 token 数 | 否 | -| prompt_tokens | integer | 输入 token 数 | 否 | -| total_tokens | integer | 总 token 数 | 否 | +```json +{ + "tool_calls": [ + { + "id": "call_xxx", + "type": "function", + "function": { + "name": "get_weather", + "arguments": "{ \"city\": \"Beijing\" }" + } + } + ] +} +``` --- -## main.V1ModelItem +# 总结 -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| created | integer | 创建时间,例如 `1626777600` | 否 | -| id | string | 模型 ID,例如 `"gpt-4"` | 否 | -| object | string | `"model"` | 否 | -| owned_by | string | 所属组织,例如 `"openai"` | 否 | +- 完全兼容 OpenAI API +- 支持流式输出 +- 支持工具调用(Function Calling) +- 支持多模型统一接入 ---- +只需将 Base URL 替换为: -## main.V1ModelsResponse +``` +https://api.bankofai.io/v1 +``` -| 字段 | 类型 | 说明 | 必填 | -|----|----|----|----| -| data | array | 模型列表 | 否 | -| object | string | `"list"` | 否 | -| success | boolean | 是否成功,例如 `true` | 否 | +即可快速接入 Bank of AI 🚀 From 9823854dcabddfdd310c6752e6ed17bc0ed604ca Mon Sep 17 00:00:00 2001 From: jerryji-prog Date: Wed, 18 Mar 2026 10:28:25 +0800 Subject: [PATCH 77/78] config sidebars --- docs/llm-service/api/API.md | 10 +++---- .../llm-service/openclaw/integration-guide.md | 4 +-- docusaurus.config.js | 28 +++++++++++-------- .../current.json | 24 ++++++++++++++++ sidebars.js | 6 ++-- src/pages/llmservice.js | 14 ++++++++++ 6 files changed, 65 insertions(+), 21 deletions(-) create mode 100644 src/pages/llmservice.js diff --git a/docs/llm-service/api/API.md b/docs/llm-service/api/API.md index 962f2ca7..bc634f8a 100644 --- a/docs/llm-service/api/API.md +++ b/docs/llm-service/api/API.md @@ -76,7 +76,7 @@ List available models. Auth: Bearer token. Response: object, success, data. | frequency_penalty | number | FrequencyPenalty: -2.0 to 2.0. Penalize repeated tokens. Default 0. | No | | max_tokens | integer | MaxTokens: maximum number of tokens that can be generated in the completion. | No | | messages | [ [main.ChatMessage](#mainchatmessage) ] | Messages: list of messages in the conversation. Required. | No | -| model | string | Model: ID of the model to use (e.g. gpt-4). Required.
*Example:* `"gpt-4"` | No | +| model | string | Model: ID of the model to use (e.g. gpt-4). Required.
*Example:* `"gpt-4"` | No | | n | integer | N: how many chat completion choices to generate. Default 1. | No | | presence_penalty | number | PresencePenalty: -2.0 to 2.0. Penalize tokens that appear in the text so far. Default 0. | No | | response_format | [main.ChatResponseFormat](#mainchatresponseformat) | ResponseFormat: specify output format: { "type": "text" } or { "type": "json_object" } or json_schema. | No | @@ -104,9 +104,9 @@ List available models. Auth: Bearer token. Response: object, success, data. | Name | Type | Description | Required | | ---- | ---- | ----------- | -------- | -| content | string | Content: message content. For tool role, the result of the tool call.
*Example:* `"Hello"` | No | +| content | string | Content: message content. For tool role, the result of the tool call.
*Example:* `"Hello"` | No | | name | string | Name: optional name for the message author (e.g. to disambiguate multiple users). | No | -| role | string | Role: "system" \| "user" \| "assistant" \| "tool". System sets behavior; user/assistant are conversation; tool is tool result.
*Example:* `"user"` | No | +| role | string | Role: "system" \| "user" \| "assistant" \| "tool". System sets behavior; user/assistant are conversation; tool is tool result.
*Example:* `"user"` | No | | tool_call_id | string | ToolCallId: when role is "tool", the id of the tool call this result is for. Required for tool messages. | No | | tool_calls | [ [main.ChatToolCallItem](#mainchattoolcallitem) ] | ToolCalls: when role is "assistant" and the model called tools, array of { id, type, function: { name, arguments } }. | No | @@ -122,7 +122,7 @@ List available models. Auth: Bearer token. Response: object, success, data. | Name | Type | Description | Required | | ---- | ---- | ----------- | -------- | | function | [main.ChatToolFunction](#mainchattoolfunction) | Function: function definition (name, description, parameters). | No | -| type | string | Type: must be "function".
*Example:* `"function"` | No | +| type | string | Type: must be "function".
*Example:* `"function"` | No | #### main.ChatToolCallFunction @@ -137,7 +137,7 @@ List available models. Auth: Bearer token. Response: object, success, data. | ---- | ---- | ----------- | -------- | | function | [main.ChatToolCallFunction](#mainchattoolcallfunction) | Function: name and arguments of the call. | No | | id | string | Id: ID of the tool call. | No | -| type | string | Type: "function".
*Example:* `"function"` | No | +| type | string | Type: "function".
*Example:* `"function"` | No | #### main.ChatToolFunction diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llm-service/openclaw/integration-guide.md index 9209bd46..6a146492 100644 --- a/docs/llm-service/openclaw/integration-guide.md +++ b/docs/llm-service/openclaw/integration-guide.md @@ -299,8 +299,8 @@ openclaw tui | Command | Description | | ---------------- | --------------------------------------- | | /status | View the current system status | -| /session | Switch to a specific chat session | -| /model | Switch the LLM | +| /session `` | Switch to a specific chat session | +| /model `` | Switch the LLM | | /help | View available commands | --- diff --git a/docusaurus.config.js b/docusaurus.config.js index 82003ae1..641929cc 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -5,7 +5,7 @@ require('dotenv').config() module.exports = { title: 'BANK OF AI | Developer Guide', tagline: 'HTTP 402 Payment Protocol for TRON', - url: 'https://docs.x402-tron.org/', + url: 'https://docs.bankofai.io', baseUrl: '/', trailingSlash: true, i18n: { @@ -97,17 +97,21 @@ module.exports = { ], plugins: [ require.resolve('./docusaurus-plugin-global-style'), - // [ - // '@docusaurus/plugin-client-redirects', - // { - // redirects: [ - // { - // from: '/overview', - // to: '/', - // }, - // ], - // }, - // ], + [ + '@docusaurus/plugin-client-redirects', + { + redirects: [ + { + from: '/llmservice', + to: '/llm-service/introduction', + }, + { + from: '/zh-Hans/llmservice', + to: '/zh-Hans/llm-service/introduction', + }, + ], + }, + ], function webpackFallbackPlugin() { return { name: 'custom-webpack-fallback-plugin', diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current.json b/i18n/zh-Hans/docusaurus-plugin-content-docs/current.json index 92a49a53..4de059a6 100644 --- a/i18n/zh-Hans/docusaurus-plugin-content-docs/current.json +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current.json @@ -43,5 +43,29 @@ "sidebar.docsSidebar.doc.OverView": { "message": "简介", "description": "The label for the doc item OverView in sidebar docsSidebar" + }, + "sidebar.docsSidebar.category.LLM Service": { + "message": "LLM 服务", + "description": "The label for category LLM Service in sidebar docsSidebar" + }, + "sidebar.docsSidebar.doc.Quick Start": { + "message": "快速入门", + "description": "The label for the doc item Quick Start in sidebar docsSidebar" + }, + "sidebar.docsSidebar.doc.Pricing and Usage": { + "message": "定价与用量", + "description": "The label for the doc item Pricing and Usage in sidebar docsSidebar" + }, + "sidebar.docsSidebar.category.Models": { + "message": "模型列表", + "description": "The label for category Models in sidebar docsSidebar" + }, + "sidebar.docsSidebar.category.OpenClaw": { + "message": "OpenClaw 集成", + "description": "The label for category OpenClaw in sidebar docsSidebar" + }, + "sidebar.docsSidebar.category.API": { + "message": "API 参考", + "description": "The label for category API in sidebar docsSidebar" } } diff --git a/sidebars.js b/sidebars.js index 096e1508..6791594a 100644 --- a/sidebars.js +++ b/sidebars.js @@ -153,7 +153,6 @@ const sidebars = { ], }, - /* --- 这里是为你修正后的 LLM Service 部分 --- */ { type: 'category', label: 'LLM Service', @@ -177,6 +176,9 @@ const sidebars = { 'llm-service/models/claude-sonnet-4-6', 'llm-service/models/gemini-3-1-pro', 'llm-service/models/gemini-3-flash', + 'llm-service/models/glm-5', + 'llm-service/models/kimi-k2.5', + 'llm-service/models/minimax-m2.5', ], }, { @@ -193,7 +195,7 @@ const sidebars = { label: 'API', collapsed: true, items: [ - 'llm-service/api/chat-completion', + 'llm-service/api/API', ], }, ], diff --git a/src/pages/llmservice.js b/src/pages/llmservice.js new file mode 100644 index 00000000..02fc46c6 --- /dev/null +++ b/src/pages/llmservice.js @@ -0,0 +1,14 @@ +import { useEffect } from 'react'; +import { useHistory, useLocation } from '@docusaurus/router'; + +export default function LlmServiceRedirect() { + const history = useHistory(); + const { pathname } = useLocation(); + + useEffect(() => { + const isZh = pathname.startsWith('/zh-Hans'); + history.replace(isZh ? '/zh-Hans/llm-service/introduction' : '/llm-service/introduction'); + }, []); + + return null; +} From 860e0a73978480001ef78d13eb95cf11f2a6984f Mon Sep 17 00:00:00 2001 From: jerryji-prog Date: Wed, 18 Mar 2026 14:27:21 +0800 Subject: [PATCH 78/78] fix llm-service-->llmservice --- docs/{llm-service => llmservice}/api/API.md | 0 .../introduction.md | 0 .../models/chatgpt-5-2.md | 0 .../models/chatgpt-5-mini.md | 0 .../models/chatgpt-5-nano.md | 0 .../models/claude-haiku-4-5.md | 0 .../models/claude-opus-4-5.md | 0 .../models/claude-opus-4-6.md | 0 .../models/claude-sonnet-4-5.md | 0 .../models/claude-sonnet-4-6.md | 0 .../models/gemini-3-1-pro.md | 0 .../models/gemini-3-flash.md | 0 .../models/glm-5.md | 0 .../models/kimi-k2.5.md | 0 .../models/minimax-m2.5.md | 0 .../openclaw/integration-guide.md | 0 .../openclaw/one-click-script-tutorial.md | 0 .../pricing-and-usage.md | 0 .../quick-start.md | 0 docusaurus.config.js | 4 +- .../{llm-service => llmservice}/api/API.md | 0 .../introduction.md | 0 .../models /chatgpt-5-2.md | 0 .../models /chatgpt-5-mini.md | 0 .../models /chatgpt-5-nano.md | 0 .../models /claude-haiku-4-5.md | 0 .../models /claude-opus-4-5.md | 0 .../models /claude-opus-4-6.md | 0 .../models /claude-sonnet-4-5.md | 0 .../models /claude-sonnet-4-6.md | 0 .../models /gemini-3-1-pro.md | 0 .../models /gemini-3-flash.md | 0 .../models /glm-5.md | 0 .../models /kimi-k2.5.md | 0 .../models /minimax-m2.5.md | 0 .../openclaw /integration-guide.md | 0 .../openclaw /one-click-script-tutorial.md | 0 .../pricing-and-usage.md | 0 .../quick-start.md | 0 .../current/sidebars.js | 43 +++++++++++++++ sidebars.js | 52 +++++++------------ src/pages/llmservice.js | 16 +++--- 42 files changed, 73 insertions(+), 42 deletions(-) rename docs/{llm-service => llmservice}/api/API.md (100%) rename docs/{llm-service => llmservice}/introduction.md (100%) rename docs/{llm-service => llmservice}/models/chatgpt-5-2.md (100%) rename docs/{llm-service => llmservice}/models/chatgpt-5-mini.md (100%) rename docs/{llm-service => llmservice}/models/chatgpt-5-nano.md (100%) rename docs/{llm-service => llmservice}/models/claude-haiku-4-5.md (100%) rename docs/{llm-service => llmservice}/models/claude-opus-4-5.md (100%) rename docs/{llm-service => llmservice}/models/claude-opus-4-6.md (100%) rename docs/{llm-service => llmservice}/models/claude-sonnet-4-5.md (100%) rename docs/{llm-service => llmservice}/models/claude-sonnet-4-6.md (100%) rename docs/{llm-service => llmservice}/models/gemini-3-1-pro.md (100%) rename docs/{llm-service => llmservice}/models/gemini-3-flash.md (100%) rename docs/{llm-service => llmservice}/models/glm-5.md (100%) rename docs/{llm-service => llmservice}/models/kimi-k2.5.md (100%) rename docs/{llm-service => llmservice}/models/minimax-m2.5.md (100%) rename docs/{llm-service => llmservice}/openclaw/integration-guide.md (100%) rename docs/{llm-service => llmservice}/openclaw/one-click-script-tutorial.md (100%) rename docs/{llm-service => llmservice}/pricing-and-usage.md (100%) rename docs/{llm-service => llmservice}/quick-start.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/api/API.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/introduction.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /chatgpt-5-2.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /chatgpt-5-mini.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /chatgpt-5-nano.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /claude-haiku-4-5.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /claude-opus-4-5.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /claude-opus-4-6.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /claude-sonnet-4-5.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /claude-sonnet-4-6.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /gemini-3-1-pro.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /gemini-3-flash.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /glm-5.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /kimi-k2.5.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/models /minimax-m2.5.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/openclaw /integration-guide.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/openclaw /one-click-script-tutorial.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/pricing-and-usage.md (100%) rename i18n/zh-Hans/docusaurus-plugin-content-docs/current/{llm-service => llmservice}/quick-start.md (100%) diff --git a/docs/llm-service/api/API.md b/docs/llmservice/api/API.md similarity index 100% rename from docs/llm-service/api/API.md rename to docs/llmservice/api/API.md diff --git a/docs/llm-service/introduction.md b/docs/llmservice/introduction.md similarity index 100% rename from docs/llm-service/introduction.md rename to docs/llmservice/introduction.md diff --git a/docs/llm-service/models/chatgpt-5-2.md b/docs/llmservice/models/chatgpt-5-2.md similarity index 100% rename from docs/llm-service/models/chatgpt-5-2.md rename to docs/llmservice/models/chatgpt-5-2.md diff --git a/docs/llm-service/models/chatgpt-5-mini.md b/docs/llmservice/models/chatgpt-5-mini.md similarity index 100% rename from docs/llm-service/models/chatgpt-5-mini.md rename to docs/llmservice/models/chatgpt-5-mini.md diff --git a/docs/llm-service/models/chatgpt-5-nano.md b/docs/llmservice/models/chatgpt-5-nano.md similarity index 100% rename from docs/llm-service/models/chatgpt-5-nano.md rename to docs/llmservice/models/chatgpt-5-nano.md diff --git a/docs/llm-service/models/claude-haiku-4-5.md b/docs/llmservice/models/claude-haiku-4-5.md similarity index 100% rename from docs/llm-service/models/claude-haiku-4-5.md rename to docs/llmservice/models/claude-haiku-4-5.md diff --git a/docs/llm-service/models/claude-opus-4-5.md b/docs/llmservice/models/claude-opus-4-5.md similarity index 100% rename from docs/llm-service/models/claude-opus-4-5.md rename to docs/llmservice/models/claude-opus-4-5.md diff --git a/docs/llm-service/models/claude-opus-4-6.md b/docs/llmservice/models/claude-opus-4-6.md similarity index 100% rename from docs/llm-service/models/claude-opus-4-6.md rename to docs/llmservice/models/claude-opus-4-6.md diff --git a/docs/llm-service/models/claude-sonnet-4-5.md b/docs/llmservice/models/claude-sonnet-4-5.md similarity index 100% rename from docs/llm-service/models/claude-sonnet-4-5.md rename to docs/llmservice/models/claude-sonnet-4-5.md diff --git a/docs/llm-service/models/claude-sonnet-4-6.md b/docs/llmservice/models/claude-sonnet-4-6.md similarity index 100% rename from docs/llm-service/models/claude-sonnet-4-6.md rename to docs/llmservice/models/claude-sonnet-4-6.md diff --git a/docs/llm-service/models/gemini-3-1-pro.md b/docs/llmservice/models/gemini-3-1-pro.md similarity index 100% rename from docs/llm-service/models/gemini-3-1-pro.md rename to docs/llmservice/models/gemini-3-1-pro.md diff --git a/docs/llm-service/models/gemini-3-flash.md b/docs/llmservice/models/gemini-3-flash.md similarity index 100% rename from docs/llm-service/models/gemini-3-flash.md rename to docs/llmservice/models/gemini-3-flash.md diff --git a/docs/llm-service/models/glm-5.md b/docs/llmservice/models/glm-5.md similarity index 100% rename from docs/llm-service/models/glm-5.md rename to docs/llmservice/models/glm-5.md diff --git a/docs/llm-service/models/kimi-k2.5.md b/docs/llmservice/models/kimi-k2.5.md similarity index 100% rename from docs/llm-service/models/kimi-k2.5.md rename to docs/llmservice/models/kimi-k2.5.md diff --git a/docs/llm-service/models/minimax-m2.5.md b/docs/llmservice/models/minimax-m2.5.md similarity index 100% rename from docs/llm-service/models/minimax-m2.5.md rename to docs/llmservice/models/minimax-m2.5.md diff --git a/docs/llm-service/openclaw/integration-guide.md b/docs/llmservice/openclaw/integration-guide.md similarity index 100% rename from docs/llm-service/openclaw/integration-guide.md rename to docs/llmservice/openclaw/integration-guide.md diff --git a/docs/llm-service/openclaw/one-click-script-tutorial.md b/docs/llmservice/openclaw/one-click-script-tutorial.md similarity index 100% rename from docs/llm-service/openclaw/one-click-script-tutorial.md rename to docs/llmservice/openclaw/one-click-script-tutorial.md diff --git a/docs/llm-service/pricing-and-usage.md b/docs/llmservice/pricing-and-usage.md similarity index 100% rename from docs/llm-service/pricing-and-usage.md rename to docs/llmservice/pricing-and-usage.md diff --git a/docs/llm-service/quick-start.md b/docs/llmservice/quick-start.md similarity index 100% rename from docs/llm-service/quick-start.md rename to docs/llmservice/quick-start.md diff --git a/docusaurus.config.js b/docusaurus.config.js index 641929cc..b6eaed44 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -103,11 +103,11 @@ module.exports = { redirects: [ { from: '/llmservice', - to: '/llm-service/introduction', + to: '/llmservice/introduction', }, { from: '/zh-Hans/llmservice', - to: '/zh-Hans/llm-service/introduction', + to: '/zh-Hans/llmservice/introduction', }, ], }, diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/api/API.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/api/API.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/api/API.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/api/API.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/introduction.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/introduction.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/introduction.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/introduction.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-2.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /chatgpt-5-2.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-2.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /chatgpt-5-2.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-mini.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /chatgpt-5-mini.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-mini.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /chatgpt-5-mini.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-nano.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /chatgpt-5-nano.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /chatgpt-5-nano.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /chatgpt-5-nano.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-haiku-4-5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /claude-haiku-4-5.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-haiku-4-5.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /claude-haiku-4-5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /claude-opus-4-5.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-5.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /claude-opus-4-5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-6.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /claude-opus-4-6.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-opus-4-6.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /claude-opus-4-6.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /claude-sonnet-4-5.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-5.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /claude-sonnet-4-5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-6.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /claude-sonnet-4-6.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /claude-sonnet-4-6.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /claude-sonnet-4-6.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-1-pro.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /gemini-3-1-pro.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-1-pro.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /gemini-3-1-pro.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-flash.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /gemini-3-flash.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /gemini-3-flash.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /gemini-3-flash.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /glm-5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /glm-5.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /glm-5.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /glm-5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /kimi-k2.5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /kimi-k2.5.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /kimi-k2.5.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /kimi-k2.5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /minimax-m2.5.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /minimax-m2.5.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/models /minimax-m2.5.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/models /minimax-m2.5.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /integration-guide.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/openclaw /integration-guide.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /integration-guide.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/openclaw /integration-guide.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /one-click-script-tutorial.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/openclaw /one-click-script-tutorial.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/openclaw /one-click-script-tutorial.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/openclaw /one-click-script-tutorial.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/pricing-and-usage.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/pricing-and-usage.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/pricing-and-usage.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/pricing-and-usage.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/quick-start.md b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/quick-start.md similarity index 100% rename from i18n/zh-Hans/docusaurus-plugin-content-docs/current/llm-service/quick-start.md rename to i18n/zh-Hans/docusaurus-plugin-content-docs/current/llmservice/quick-start.md diff --git a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/sidebars.js b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/sidebars.js index 99f5c1c0..b4a074fe 100644 --- a/i18n/zh-Hans/docusaurus-plugin-content-docs/current/sidebars.js +++ b/i18n/zh-Hans/docusaurus-plugin-content-docs/current/sidebars.js @@ -150,6 +150,49 @@ const sidebars = { collapsed: false, items: ['Openclaw-extension/Overview', 'Openclaw-extension/Setup-use'], }, + + { + type: 'category', + label: 'LLM Service', + collapsed: false, + items: [ + { type: 'doc', id: 'llmservice/introduction', label: '简介' }, + { type: 'doc', id: 'llmservice/quick-start', label: '快速开始' }, + { type: 'doc', id: 'llmservice/pricing-and-usage', label: '定价与使用' }, + { + type: 'category', + label: '模型', + collapsed: true, + items: [ + 'llmservice/models/chatgpt-5-2', + 'llmservice/models/chatgpt-5-mini', + 'llmservice/models/chatgpt-5-nano', + 'llmservice/models/claude-haiku-4-5', + 'llmservice/models/claude-opus-4-5', + 'llmservice/models/claude-opus-4-6', + 'llmservice/models/claude-sonnet-4-5', + 'llmservice/models/claude-sonnet-4-6', + 'llmservice/models/gemini-3-1-pro', + 'llmservice/models/gemini-3-flash', + 'llmservice/models/glm-5', + 'llmservice/models/kimi-k2.5', + 'llmservice/models/minimax-m2.5', + ], + }, + { + type: 'category', + label: 'OpenClaw', + collapsed: true, + items: ['llmservice/openclaw/integration-guide', 'llmservice/openclaw/one-click-script-tutorial'], + }, + { + type: 'category', + label: 'API', + collapsed: true, + items: ['llmservice/api/API'], + }, + ], + }, ], } diff --git a/sidebars.js b/sidebars.js index 6791594a..ade303a2 100644 --- a/sidebars.js +++ b/sidebars.js @@ -73,11 +73,7 @@ const sidebars = { type: 'category', label: 'Usage', collapsed: false, - items: [ - '8004/Usage/Install', - '8004/Usage/ConfigureAgents', - '8004/Usage/RegistrationHTTP' - ], + items: ['8004/Usage/Install', '8004/Usage/ConfigureAgents', '8004/Usage/RegistrationHTTP'], }, ], }, @@ -147,10 +143,7 @@ const sidebars = { type: 'category', label: 'Openclaw Extension', collapsed: false, - items: [ - 'Openclaw-extension/Overview', - 'Openclaw-extension/Setup-use' - ], + items: ['Openclaw-extension/Overview', 'Openclaw-extension/Setup-use'], }, { @@ -158,45 +151,40 @@ const sidebars = { label: 'LLM Service', collapsed: false, items: [ - { type: 'doc', id: 'llm-service/introduction', label: 'Introduction' }, - { type: 'doc', id: 'llm-service/quick-start', label: 'Quick Start' }, - { type: 'doc', id: 'llm-service/pricing-and-usage', label: 'Pricing and Usage' }, + { type: 'doc', id: 'llmservice/introduction', label: 'Introduction' }, + { type: 'doc', id: 'llmservice/quick-start', label: 'Quick Start' }, + { type: 'doc', id: 'llmservice/pricing-and-usage', label: 'Pricing and Usage' }, { type: 'category', label: 'Models', collapsed: true, items: [ - 'llm-service/models/chatgpt-5-2', - 'llm-service/models/chatgpt-5-mini', - 'llm-service/models/chatgpt-5-nano', - 'llm-service/models/claude-haiku-4-5', - 'llm-service/models/claude-opus-4-5', - 'llm-service/models/claude-opus-4-6', - 'llm-service/models/claude-sonnet-4-5', - 'llm-service/models/claude-sonnet-4-6', - 'llm-service/models/gemini-3-1-pro', - 'llm-service/models/gemini-3-flash', - 'llm-service/models/glm-5', - 'llm-service/models/kimi-k2.5', - 'llm-service/models/minimax-m2.5', + 'llmservice/models/chatgpt-5-2', + 'llmservice/models/chatgpt-5-mini', + 'llmservice/models/chatgpt-5-nano', + 'llmservice/models/claude-haiku-4-5', + 'llmservice/models/claude-opus-4-5', + 'llmservice/models/claude-opus-4-6', + 'llmservice/models/claude-sonnet-4-5', + 'llmservice/models/claude-sonnet-4-6', + 'llmservice/models/gemini-3-1-pro', + 'llmservice/models/gemini-3-flash', + 'llmservice/models/glm-5', + 'llmservice/models/kimi-k2.5', + 'llmservice/models/minimax-m2.5', ], }, { type: 'category', label: 'OpenClaw', collapsed: true, - items: [ - 'llm-service/openclaw/integration-guide', - 'llm-service/openclaw/one-click-script-tutorial', - ], + items: ['llmservice/openclaw/integration-guide', 'llmservice/openclaw/one-click-script-tutorial'], }, { type: 'category', label: 'API', collapsed: true, - items: [ - 'llm-service/api/API', - ], + items: ['llmservice/api/API'], }, ], }, diff --git a/src/pages/llmservice.js b/src/pages/llmservice.js index 02fc46c6..a524ade6 100644 --- a/src/pages/llmservice.js +++ b/src/pages/llmservice.js @@ -1,14 +1,14 @@ -import { useEffect } from 'react'; -import { useHistory, useLocation } from '@docusaurus/router'; +import { useEffect } from 'react' +import { useHistory, useLocation } from '@docusaurus/router' export default function LlmServiceRedirect() { - const history = useHistory(); - const { pathname } = useLocation(); + const history = useHistory() + const { pathname } = useLocation() useEffect(() => { - const isZh = pathname.startsWith('/zh-Hans'); - history.replace(isZh ? '/zh-Hans/llm-service/introduction' : '/llm-service/introduction'); - }, []); + const isZh = pathname.startsWith('/zh-Hans') + history.replace(isZh ? '/zh-Hans/llmservice/introduction' : '/llmservice/introduction') + }, []) - return null; + return null }