diff --git a/README.md b/README.md index d496d68edfc..30ccd714379 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +# attention please + +This project is forked from [ChatGPT-Next-Web](https://github.com/Yidadaa/ChatGPT-Next-Web), and changed somewhere as personal like, thanks to [Yidadaa](https://github.com/Yidadaa) and some other developers' hard working and finally I have my own ChatGPT assistant now. Also, I should thank [Vercel](https://vercel.com/), Vercel is the platform for frontend developers, providing the speed and reliability innovators need to create at the moment of inspiration. Thanks again. +
diff --git a/app/api/anthropic/[...path]/route.ts b/app/api/anthropic/[...path]/route.ts
index 4264893d93e..4b1d8cb3ae4 100644
--- a/app/api/anthropic/[...path]/route.ts
+++ b/app/api/anthropic/[...path]/route.ts
@@ -106,12 +106,9 @@ async function request(req: NextRequest) {
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
- const timeoutId = setTimeout(
- () => {
- controller.abort();
- },
- 10 * 60 * 1000,
- );
+ const timeoutId = setTimeout(() => {
+ controller.abort();
+ }, 10 * 60 * 1000);
const fetchUrl = `${baseUrl}${path}`;
diff --git a/app/api/common.ts b/app/api/common.ts
index a75f2de5cfa..2265a9575ec 100644
--- a/app/api/common.ts
+++ b/app/api/common.ts
@@ -44,12 +44,9 @@ export async function requestOpenai(req: NextRequest) {
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
- const timeoutId = setTimeout(
- () => {
- controller.abort();
- },
- 10 * 60 * 1000,
- );
+ const timeoutId = setTimeout(() => {
+ controller.abort();
+ }, 10 * 60 * 1000);
if (serverConfig.isAzure) {
if (!serverConfig.azureApiVersion) {
@@ -112,16 +109,16 @@ export async function requestOpenai(req: NextRequest) {
try {
const res = await fetch(fetchUrl, fetchOptions);
- // Extract the OpenAI-Organization header from the response
- const openaiOrganizationHeader = res.headers.get("OpenAI-Organization");
+ // Extract the OpenAI-Organization header from the response
+ const openaiOrganizationHeader = res.headers.get("OpenAI-Organization");
- // Check if serverConfig.openaiOrgId is defined and not an empty string
- if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") {
- // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present
- console.log("[Org ID]", openaiOrganizationHeader);
- } else {
- console.log("[Org ID] is not set up.");
- }
+ // Check if serverConfig.openaiOrgId is defined and not an empty string
+ if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") {
+ // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present
+ console.log("[Org ID]", openaiOrganizationHeader);
+ } else {
+ console.log("[Org ID] is not set up.");
+ }
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
@@ -129,7 +126,6 @@ export async function requestOpenai(req: NextRequest) {
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
-
// Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV)
// Also, this is to prevent the header from being sent to the client
if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === "") {
@@ -142,7 +138,6 @@ export async function requestOpenai(req: NextRequest) {
// The browser will try to decode the response with brotli and fail
newHeaders.delete("content-encoding");
-
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
diff --git a/app/api/google/[...path]/route.ts b/app/api/google/[...path]/route.ts
index ebd19289129..ffaa3dcaba6 100644
--- a/app/api/google/[...path]/route.ts
+++ b/app/api/google/[...path]/route.ts
@@ -32,12 +32,9 @@ async function handle(
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
- const timeoutId = setTimeout(
- () => {
- controller.abort();
- },
- 10 * 60 * 1000,
- );
+ const timeoutId = setTimeout(() => {
+ controller.abort();
+ }, 10 * 60 * 1000);
const authResult = auth(req, ModelProvider.GeminiPro);
if (authResult.error) {
diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts
index 816c2046b22..5675f425d6b 100644
--- a/app/api/webdav/[...path]/route.ts
+++ b/app/api/webdav/[...path]/route.ts
@@ -24,8 +24,8 @@ async function handle(
// Validate the endpoint to prevent potential SSRF attacks
if (
- !mergedAllowedWebDavEndpoints.some(
- (allowedEndpoint) => endpoint?.startsWith(allowedEndpoint),
+ !mergedAllowedWebDavEndpoints.some((allowedEndpoint) =>
+ endpoint?.startsWith(allowedEndpoint),
)
) {
return NextResponse.json(
diff --git a/app/client/api.ts b/app/client/api.ts
index 7bee546b4f6..b1ac27b936f 100644
--- a/app/client/api.ts
+++ b/app/client/api.ts
@@ -12,7 +12,7 @@ import { ClaudeApi } from "./platforms/anthropic";
export const ROLES = ["system", "user", "assistant"] as const;
export type MessageRole = (typeof ROLES)[number];
-export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
+export const Models = ["gpt-3.5-turbo"] as const;
export type ChatModel = ModelType;
export interface MultimodalContent {
diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts
index a786f5275f4..76006fd6729 100644
--- a/app/client/platforms/google.ts
+++ b/app/client/platforms/google.ts
@@ -120,7 +120,9 @@ export class GeminiProApi implements LLMApi {
if (!baseUrl) {
baseUrl = isApp
- ? DEFAULT_API_HOST + "/api/proxy/google/" + Google.ChatPath(modelConfig.model)
+ ? DEFAULT_API_HOST +
+ "/api/proxy/google/" +
+ Google.ChatPath(modelConfig.model)
: this.path(Google.ChatPath(modelConfig.model));
}
@@ -139,7 +141,7 @@ export class GeminiProApi implements LLMApi {
() => controller.abort(),
REQUEST_TIMEOUT_MS,
);
-
+
if (shouldStream) {
let responseText = "";
let remainText = "";
@@ -182,59 +184,58 @@ export class GeminiProApi implements LLMApi {
const decoder = new TextDecoder();
let partialData = "";
- return reader?.read().then(function processText({
- done,
- value,
- }): Promise