Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
6aecdd8
Update constant.ts
Davidlasky Feb 26, 2025
af1dfd2
Update constant.ts
Davidlasky Feb 26, 2025
75cdd15
Update constant.ts
Davidlasky Feb 26, 2025
3b23f5f
Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next…
actions-user Feb 27, 2025
e8dcede
Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next…
actions-user Mar 1, 2025
fd2e69d
Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next…
actions-user Mar 2, 2025
fd998de
Merge branch 'ChatGPTNextWeb:main' into main
Davidlasky Mar 14, 2025
20df2ee
Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next…
actions-user Mar 21, 2025
2f5184c
Update constant.ts
Davidlasky Mar 25, 2025
d65aca6
Update constant.ts
Davidlasky Mar 25, 2025
2509495
try to add o1 as a vision model
Davidlasky Mar 25, 2025
6d69494
Update utils.ts
Davidlasky Mar 27, 2025
b5ee4c1
make timeout longer
Davidlasky Mar 29, 2025
2329d59
defaults o3-mini to high
Davidlasky Apr 1, 2025
e30d907
remove legacy models and support new models
Davidlasky Apr 1, 2025
106db97
enable o3-mini-high, optimize timeout for thinking models
Davidlasky Apr 1, 2025
ff196f2
local debug
Davidlasky Apr 1, 2025
f2a5af7
ignore local Dockerfile
Davidlasky Apr 1, 2025
d02f9b0
added gpt-4.1 and removed gpt-4.5-preview
Davidlasky Apr 15, 2025
ac7b720
send longer chat history
Davidlasky Apr 15, 2025
ef7674b
only used desired model providers
Davidlasky Apr 18, 2025
55b6f23
switch from yarn to npm
Davidlasky Apr 18, 2025
5d4e393
add o4-mini-high and remove all unused models
Davidlasky Apr 18, 2025
d2e484f
change to npm
Davidlasky Apr 18, 2025
3aa2c68
add gemini-2.5-flash-preview-04-17 and replace gemini-2.5-pro-exp-03-…
Davidlasky Apr 18, 2025
e4264d3
Update test.yml
Davidlasky Apr 18, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,19 @@ jobs:
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: 18
cache: "yarn"
node-version: 23
cache: 'npm'

- name: Cache node_modules
uses: actions/cache@v4
with:
path: node_modules
key: ${{ runner.os }}-node_modules-${{ hashFiles('**/yarn.lock') }}
key: ${{ runner.os }}-node_modules-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node_modules-

- name: Install dependencies
run: yarn install
run: npm install

- name: Run Jest tests
run: yarn test:ci
run: npm run test:ci
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -49,3 +49,4 @@ masks.json

# mcp config
app/mcp/mcp_config.json
Dockerfile.local
16 changes: 7 additions & 9 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,29 +1,25 @@
FROM node:18-alpine AS base
FROM node:23-alpine AS base
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

NodeΒ 23‑alpine may not yet be an LTS image – verify runtime compatibility and availability

Jumping from node:18-alpine (current LTS) to node:23-alpine brings bleeding‑edge V8 semantics and potential ABI changes.
β€’ Some native add‑ons compiled during npm install (e.g. sharp, bcrypt) may break.
β€’ Docker Hub does not currently publish an official 23-alpine tag; the build will 404.

Unless you explicitly need NodeΒ 23 features, stick to the latest LTS (node:20‑alpine) or test the new tag’s existence in CI before merging.


FROM base AS deps

RUN apk add --no-cache libc6-compat

WORKDIR /app

COPY package.json yarn.lock ./
COPY package.json package-lock.json ./

RUN yarn config set registry 'https://registry.npmmirror.com/'
RUN yarn install
RUN npm config set registry 'https://registry.npmmirror.com/'
RUN npm install

FROM base AS builder

RUN apk update && apk add --no-cache git

ENV OPENAI_API_KEY=""
ENV GOOGLE_API_KEY=""
ENV CODE=""

WORKDIR /app
COPY --from=deps /app/node_modules ./node_modules
COPY . .

RUN yarn build
RUN npm run build

FROM base AS runner
WORKDIR /app
Expand All @@ -33,6 +29,8 @@ RUN apk add proxychains-ng
ENV PROXY_URL=""
ENV OPENAI_API_KEY=""
ENV GOOGLE_API_KEY=""
ENV ANTHROPIC_API_KEY=""
ENV VISION_MODELS=""
ENV CODE=""
ENV ENABLE_MCP=""

Expand Down
2 changes: 0 additions & 2 deletions app/client/platforms/anthropic.ts
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,6 @@ const ClaudeMapper = {
system: "user",
} as const;

const keys = ["claude-2, claude-instant-1"];

export class ClaudeApi implements LLMApi {
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
Expand Down
2 changes: 0 additions & 2 deletions app/client/platforms/google.ts
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,6 @@ export class GeminiProApi implements LLMApi {
signal: controller.signal,
headers: getHeaders(),
};

const isThinking = options.config.model.includes("-thinking");
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
Expand Down
45 changes: 28 additions & 17 deletions app/client/platforms/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@ export interface RequestPayload {
top_p: number;
max_tokens?: number;
max_completion_tokens?: number;
reasoning_effort?: string;
// O3 only
}

export interface DalleRequestPayload {
Expand Down Expand Up @@ -196,9 +198,10 @@ export class ChatGPTApi implements LLMApi {
let requestPayload: RequestPayload | DalleRequestPayload;

const isDalle3 = _isDalle3(options.config.model);
const isO1OrO3 =
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3");
const isO1 = options.config.model.startsWith("o1");
const isO3 = options.config.model.startsWith("o3");
const isO4 = options.config.model.startsWith("o4");
const isO1OrO3orO4 = isO1 || isO3 || isO4;
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
Expand All @@ -220,7 +223,7 @@ export class ChatGPTApi implements LLMApi {
const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
if (!(isO1OrO3 && v.role === "system"))
if (!(isO1OrO3orO4 && v.role === "system"))
messages.push({ role: v.role, content });
}

Expand All @@ -229,22 +232,31 @@ export class ChatGPTApi implements LLMApi {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
temperature: !isO1OrO3orO4 ? modelConfig.temperature : 1,
presence_penalty: !isO1OrO3orO4 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1OrO3orO4 ? modelConfig.frequency_penalty : 0,
top_p: !isO1OrO3orO4 ? modelConfig.top_p : 1,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};

// O1 使用 max_completion_tokens 控刢tokenζ•° (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
if (isO1OrO3) {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
if (isO1OrO3orO4) {
requestPayload["max_completion_tokens"] = 20000;
}

if (isO4) {
requestPayload["reasoning_effort"] = "high";
// make o4-mini defaults to high reasoning effort
}

// add max_tokens to vision model
if (visionModel) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
if (isO1OrO3orO4) {
requestPayload["max_completion_tokens"] = 20000;
} else {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}
}

Expand Down Expand Up @@ -286,6 +298,11 @@ export class ChatGPTApi implements LLMApi {
isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath,
);
}
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
let index = -1;
const [tools, funcs] = usePluginStore
Expand Down Expand Up @@ -393,12 +410,6 @@ export class ChatGPTApi implements LLMApi {
headers: getHeaders(),
};

// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);

const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);

Expand Down
Loading
Loading