diff --git a/README.md b/README.md index 7c7a6f243d6..4a2ea1a2426 100644 --- a/README.md +++ b/README.md @@ -177,10 +177,15 @@ Specify OpenAI organization ID. ### `AZURE_URL` (optional) -> Example: https://{azure-resource-url}/openai/deployments/{deploy-name} +> Example: https://{azure-resource-url}/openai/deployments/{deploy-name} +> +> Example: https://xxx.openai.azure.com/openai/deployments/{deploy-name} Azure deploy url. +If `{deploy-name}` is using the template mode, then it will automatically replace the path based on the model selected by the client. +If your model name is different from the deployment name, then you need to set the `AZURE_OPENAI_MODEL_MAPPER` parameter. + ### `AZURE_API_KEY` (optional) Azure Api Key. @@ -216,12 +221,22 @@ If you want to disable parse settings from url, set this to 1. ### `CUSTOM_MODELS` (optional) > Default: Empty +> > Example: `+llama,+claude-2,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` means add `llama, claude-2` to model list, and remove `gpt-3.5-turbo` from list, and display `gpt-4-1106-preview` as `gpt-4-turbo`. +> +> Example: `-all,gpt-35-turbo,gpt-4,gpt-4-32k`. The meaning is to only display `gpt-3.5-turbo`, `gpt-4`, and `gpt-4-32k` in the model list. To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma. User `-all` to disable all default models, `+all` to enable all default models. +### `AZURE_OPENAI_MODEL_MAPPER` (optional) +> Default: Empty +> Example: `gpt-3.5-turbo=gpt-35-turbo` means map `gpt-3.5-turbo` to `gpt-35-turbo` + +If you are deploying ChatGPT using Azure OpenAI, it is recommended to set the `AZURE_OPENAI_MODEL_MAPPER`. +The session summarization feature relies on the `gpt-3.5-turbo` model, unless the name of your Azure deployment is the same as it. + ## Requirements NodeJS >= 18, Docker >= 20 diff --git a/README_CN.md b/README_CN.md index d734796581b..3cc6e4b95f7 100644 --- a/README_CN.md +++ b/README_CN.md @@ -124,9 +124,16 @@ Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.micro > 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`,并将 `gpt-4-1106-preview` 模型名字展示为 `gpt-4-turbo`。 > 如果你想先禁用所有模型,再启用指定模型,可以使用 `-all,+gpt-3.5-turbo`,则表示仅启用 `gpt-3.5-turbo` - +> Example: `-all,gpt-35-turbo,gpt-4,gpt-4-32k`. 意思是在model 列表中只显示gpt-35-turbo,gpt-4,gpt-4-32k 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 +### `AZURE_OPENAI_MODEL_MAPPER` (optional) +> Default: Empty +> Example: `gpt-3.5-turbo=gpt-35-turbo` 意思是 `gpt-3.5-turbo` 映射到 `gpt-35-turbo` + +如果你使用azure openai 来部署的chatgpt,建议设定AZURE_OPENAI_MODEL_MAPPER . +会话摘要功能依赖gpt-3.5-turbo 这个model,除非你的azure deployment的名字和它相同 + ## 开发 点击下方按钮,开始二次开发: diff --git a/app/api/common.ts b/app/api/common.ts index 6b0d619df1d..7edd96358f8 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -6,6 +6,13 @@ import { makeAzurePath } from "../azure"; const serverConfig = getServerSideConfig(); +export function azureModelMaper(model: string) { + if (serverConfig.isAzure) { + return serverConfig.azureModelMaper[model] ?? model; + } + return model; +} + export async function requestOpenai(req: NextRequest) { const controller = new AbortController(); @@ -28,6 +35,13 @@ export async function requestOpenai(req: NextRequest) { baseUrl = baseUrl.slice(0, -1); } + let body = await req.json(); + console.log("[model name]", body["model"]); + baseUrl = baseUrl.replace( + "{deploy-name}", + azureModelMaper(body["model"]) as string, + ); + console.log("[Proxy] ", path); console.log("[Base Url]", baseUrl); // this fix [Org ID] undefined in server side if not using custom point @@ -63,7 +77,7 @@ export async function requestOpenai(req: NextRequest) { }), }, method: req.method, - body: req.body, + body: JSON.stringify(body), // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body redirect: "manual", // @ts-ignore @@ -72,7 +86,7 @@ export async function requestOpenai(req: NextRequest) { }; // #1815 try to refuse gpt4 request - if (serverConfig.customModels && req.body) { + if (serverConfig.customModels && body) { try { const modelTable = collectModelTable( DEFAULT_MODELS, diff --git a/app/config/server.ts b/app/config/server.ts index 2398805a264..58779ff8122 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -70,6 +70,16 @@ export const getServerSideConfig = () => { `[Server Config] using ${randomIndex + 1} of ${apiKeys.length} api key`, ); + let AzureModelMaper: Record = {}; + (process.env.AZURE_OPENAI_MODEL_MAPPER || "") + .trim() + .split(",") + .map((v) => v.trim().split("=")) + .forEach(([k, v]) => { + AzureModelMaper[k] = v; + }); + console.log("[AZURE_OPENAI_MODEL_MAPPER]", AzureModelMaper); + return { baseUrl: process.env.BASE_URL, apiKey, @@ -79,6 +89,7 @@ export const getServerSideConfig = () => { azureUrl: process.env.AZURE_URL, azureApiKey: process.env.AZURE_API_KEY, azureApiVersion: process.env.AZURE_API_VERSION, + azureModelMaper: AzureModelMaper, needCode: ACCESS_CODES.size > 0, code: process.env.CODE, diff --git a/next.config.mjs b/next.config.mjs index 4faa63e5450..c62e33307b7 100644 --- a/next.config.mjs +++ b/next.config.mjs @@ -24,6 +24,8 @@ const nextConfig = { child_process: false, }; + // open source map + config.devtool = 'source-map'; return config; }, output: mode,