diff --git a/apps/dokploy/components/dashboard/settings/destination/handle-destinations.tsx b/apps/dokploy/components/dashboard/settings/destination/handle-destinations.tsx index 966c8e5f5b..7728fd7dbe 100644 --- a/apps/dokploy/components/dashboard/settings/destination/handle-destinations.tsx +++ b/apps/dokploy/components/dashboard/settings/destination/handle-destinations.tsx @@ -37,16 +37,42 @@ import { cn } from "@/lib/utils"; import { api } from "@/utils/api"; import { S3_PROVIDERS } from "./constants"; -const addDestination = z.object({ - name: z.string().min(1, "Name is required"), - provider: z.string().min(1, "Provider is required"), - accessKeyId: z.string().min(1, "Access Key Id is required"), - secretAccessKey: z.string().min(1, "Secret Access Key is required"), - bucket: z.string().min(1, "Bucket is required"), - region: z.string(), - endpoint: z.string().min(1, "Endpoint is required"), - serverId: z.string().optional(), -}); +const addDestination = z + .object({ + name: z.string().min(1, "Name is required"), + destinationType: z.enum(["s3", "sftp", "ftp"]).default("s3"), + provider: z.string().optional(), + accessKeyId: z.string().optional(), + secretAccessKey: z.string().optional(), + bucket: z.string().optional(), + region: z.string().optional(), + endpoint: z.string().optional(), + host: z.string().optional(), + port: z.coerce.number().optional(), + username: z.string().optional(), + password: z.string().optional(), + basePath: z.string().optional(), + serverId: z.string().optional(), + }) + .superRefine((data, ctx) => { + if (data.destinationType === "s3") { + if (!data.accessKeyId) + ctx.addIssue({ code: "custom", path: ["accessKeyId"], message: "Access Key Id is required" }); + if (!data.secretAccessKey) + ctx.addIssue({ code: "custom", path: ["secretAccessKey"], message: "Secret Access Key is required" }); + if (!data.bucket) + ctx.addIssue({ code: "custom", path: ["bucket"], message: "Bucket is required" }); + if (!data.endpoint) + ctx.addIssue({ code: "custom", path: ["endpoint"], message: "Endpoint is required" }); + } else { + if (!data.host) + ctx.addIssue({ code: "custom", path: ["host"], message: "Host is required" }); + if (!data.username) + ctx.addIssue({ code: "custom", path: ["username"], message: "Username is required" }); + if (!data.password) + ctx.addIssue({ code: "custom", path: ["password"], message: "Password is required" }); + } + }); type AddDestination = z.infer; @@ -82,6 +108,7 @@ export const HandleDestinations = ({ destinationId }: Props) => { const form = useForm({ defaultValues: { + destinationType: "s3", provider: "", accessKeyId: "", bucket: "", @@ -89,19 +116,33 @@ export const HandleDestinations = ({ destinationId }: Props) => { region: "", secretAccessKey: "", endpoint: "", + host: "", + port: undefined, + username: "", + password: "", + basePath: "", }, resolver: zodResolver(addDestination), }); + + const destinationType = form.watch("destinationType"); + useEffect(() => { if (destination) { form.reset({ name: destination.name, + destinationType: (destination.destinationType as "s3" | "sftp" | "ftp") ?? "s3", provider: destination.provider || "", accessKeyId: destination.accessKey, secretAccessKey: destination.secretAccessKey, bucket: destination.bucket, region: destination.region, endpoint: destination.endpoint, + host: destination.host || "", + port: destination.port ?? undefined, + username: destination.username || "", + password: "", + basePath: destination.basePath || "", }); } else { form.reset(); @@ -109,14 +150,21 @@ export const HandleDestinations = ({ destinationId }: Props) => { }, [form, form.reset, form.formState.isSubmitSuccessful, destination]); const onSubmit = async (data: AddDestination) => { + const isS3 = data.destinationType === "s3"; await mutateAsync({ - provider: data.provider || "", - accessKey: data.accessKeyId, - bucket: data.bucket, - endpoint: data.endpoint, name: data.name, - region: data.region, - secretAccessKey: data.secretAccessKey, + destinationType: data.destinationType, + provider: data.provider || "", + accessKey: isS3 ? (data.accessKeyId || "") : "", + bucket: isS3 ? (data.bucket || "") : "", + endpoint: isS3 ? (data.endpoint || "") : "", + region: isS3 ? (data.region || "") : "", + secretAccessKey: isS3 ? (data.secretAccessKey || "") : "", + host: data.host, + port: data.port, + username: data.username, + password: data.password, + basePath: data.basePath, destinationId: destinationId || "", }) .then(async () => { @@ -135,25 +183,20 @@ export const HandleDestinations = ({ destinationId }: Props) => { }; const handleTestConnection = async (serverId?: string) => { - const result = await form.trigger([ - "provider", - "accessKeyId", - "secretAccessKey", - "bucket", - "endpoint", - ]); - - if (!result) { - const errors = form.formState.errors; - const errorFields = Object.entries(errors) - .map(([field, error]) => `${field}: ${error?.message}`) - .filter(Boolean) - .join("\n"); + const type = form.getValues("destinationType"); - toast.error("Please fill all required fields", { - description: errorFields, - }); - return; + if (type === "s3") { + const result = await form.trigger(["provider", "accessKeyId", "secretAccessKey", "bucket", "endpoint"]); + if (!result) { + toast.error("Please fill all required fields"); + return; + } + } else { + const result = await form.trigger(["host", "username", "password"]); + if (!result) { + toast.error("Please fill all required fields"); + return; + } } if (isCloud && !serverId) { @@ -161,31 +204,31 @@ export const HandleDestinations = ({ destinationId }: Props) => { return; } - const provider = form.getValues("provider"); - const accessKey = form.getValues("accessKeyId"); - const secretKey = form.getValues("secretAccessKey"); - const bucket = form.getValues("bucket"); - const endpoint = form.getValues("endpoint"); - const region = form.getValues("region"); - - const connectionString = `:s3,provider=${provider},access_key_id=${accessKey},secret_access_key=${secretKey},endpoint=${endpoint}${region ? `,region=${region}` : ""}:${bucket}`; + const values = form.getValues(); + const isS3 = type === "s3"; await testConnection({ - provider, - accessKey, - bucket, - endpoint, name: "Test", - region, - secretAccessKey: secretKey, + destinationType: type, + provider: values.provider || "", + accessKey: isS3 ? (values.accessKeyId || "") : "", + bucket: isS3 ? (values.bucket || "") : "", + endpoint: isS3 ? (values.endpoint || "") : "", + region: isS3 ? (values.region || "") : "", + secretAccessKey: isS3 ? (values.secretAccessKey || "") : "", + host: values.host, + port: values.port, + username: values.username, + password: values.password, + basePath: values.basePath, serverId, }) .then(() => { toast.success("Connection Success"); }) .catch((e) => { - toast.error("Error connecting to provider", { - description: `${e.message}\n\nTry manually: rclone ls ${connectionString}`, + toast.error("Error connecting to destination", { + description: e.message, }); }); }; @@ -214,9 +257,7 @@ export const HandleDestinations = ({ destinationId }: Props) => { {destinationId ? "Update" : "Add"} Destination - In this section, you can configure and add new destinations for your - backups. Please ensure that you provide the correct information to - guarantee secure and efficient storage. + Configure a backup destination. Supports S3-compatible storage, SFTP, and FTP. {(isError || isErrorConnection) && ( @@ -234,79 +275,11 @@ export const HandleDestinations = ({ destinationId }: Props) => { { - return ( - - Name - - - - - - ); - }} - /> - { - return ( - - Provider - - - - - - ); - }} - /> - - { - return ( - - Access Key Id - - - - - - ); - }} - /> - ( -
- Secret Access Key -
+ Name - +
@@ -314,50 +287,209 @@ export const HandleDestinations = ({ destinationId }: Props) => { /> ( -
- Bucket -
+ Type - - - -
- )} - /> - ( - -
- Region -
- - - - -
- )} - /> - ( - - Endpoint - - + )} /> + + {destinationType === "s3" && ( + <> + ( + + Provider + + + + + + )} + /> + ( + + Access Key Id + + + + + + )} + /> + ( + + Secret Access Key + + + + + + )} + /> + ( + + Bucket + + + + + + )} + /> + ( + + Region + + + + + + )} + /> + ( + + Endpoint + + + + + + )} + /> + + )} + + {(destinationType === "sftp" || destinationType === "ftp") && ( + <> + ( + + Host + + + + + + )} + /> + ( + + Port (optional) + + + + + + )} + /> + ( + + Username + + + + + + )} + /> + ( + + Password + + + + + + )} + /> + ( + + Base Path (optional) + + + + + + )} + /> + + )} { {isCloud ? (
- Select a server to test the destination. If you don't have a + Select a server to test the destination. If you do not have a server choose the default one. { - S3 Destinations + Backup Destinations - Add your providers like AWS S3, Cloudflare R2, Wasabi, - DigitalOcean Spaces etc. + Add destinations like AWS S3, Cloudflare R2, Wasabi, + DigitalOcean Spaces, SFTP servers, or FTP servers. @@ -44,7 +45,7 @@ export const ShowDestinations = () => { To create a backup it is required to set at least 1 - provider. + destination. {permissions?.destination.create && }
@@ -58,9 +59,14 @@ export const ShowDestinations = () => { >
- - {index + 1}. {destination.name} - +
+ + {index + 1}. {destination.name} + + + {(destination as any).destinationType ?? "s3"} + +
Created at:{" "} {new Date( diff --git a/apps/dokploy/drizzle/0150_add_sftp_ftp_destination_types.sql b/apps/dokploy/drizzle/0150_add_sftp_ftp_destination_types.sql new file mode 100755 index 0000000000..b019f4957f --- /dev/null +++ b/apps/dokploy/drizzle/0150_add_sftp_ftp_destination_types.sql @@ -0,0 +1,11 @@ +ALTER TABLE "destination" ADD COLUMN "destinationType" text NOT NULL DEFAULT 's3'; +ALTER TABLE "destination" ADD COLUMN "host" text; +ALTER TABLE "destination" ADD COLUMN "port" integer; +ALTER TABLE "destination" ADD COLUMN "username" text; +ALTER TABLE "destination" ADD COLUMN "password" text; +ALTER TABLE "destination" ADD COLUMN "basePath" text; +ALTER TABLE "destination" ALTER COLUMN "accessKey" SET DEFAULT ''; +ALTER TABLE "destination" ALTER COLUMN "secretAccessKey" SET DEFAULT ''; +ALTER TABLE "destination" ALTER COLUMN "bucket" SET DEFAULT ''; +ALTER TABLE "destination" ALTER COLUMN "region" SET DEFAULT ''; +ALTER TABLE "destination" ALTER COLUMN "endpoint" SET DEFAULT ''; diff --git a/apps/dokploy/drizzle/meta/_journal.json b/apps/dokploy/drizzle/meta/_journal.json index fa42bc1eb6..bba2c252b5 100644 --- a/apps/dokploy/drizzle/meta/_journal.json +++ b/apps/dokploy/drizzle/meta/_journal.json @@ -1051,6 +1051,13 @@ "when": 1773637297592, "tag": "0149_rare_radioactive_man", "breakpoints": true + }, + { + "idx": 150, + "version": "7", + "when": 1774000000000, + "tag": "0150_add_sftp_ftp_destination_types", + "breakpoints": true } ] } \ No newline at end of file diff --git a/apps/dokploy/server/api/routers/backup.ts b/apps/dokploy/server/api/routers/backup.ts index d84eb7e9da..507fb65049 100644 --- a/apps/dokploy/server/api/routers/backup.ts +++ b/apps/dokploy/server/api/routers/backup.ts @@ -27,7 +27,8 @@ import { import { findDestinationById } from "@dokploy/server/services/destination"; import { runComposeBackup } from "@dokploy/server/utils/backups/compose"; import { - getS3Credentials, + getRcloneFlags, + getRcloneRemote, normalizeS3Path, } from "@dokploy/server/utils/backups/utils"; import { @@ -424,8 +425,8 @@ export const backupRouter = createTRPCRouter({ .query(async ({ input }) => { try { const destination = await findDestinationById(input.destinationId); - const rcloneFlags = getS3Credentials(destination); - const bucketPath = `:s3:${destination.bucket}`; + const rcloneFlags = getRcloneFlags(destination); + const bucketPath = getRcloneRemote(destination, ""); const lastSlashIndex = input.search.lastIndexOf("/"); const baseDir = diff --git a/apps/dokploy/server/api/routers/destination.ts b/apps/dokploy/server/api/routers/destination.ts index 81720fc1df..7ff8ffb50c 100644 --- a/apps/dokploy/server/api/routers/destination.ts +++ b/apps/dokploy/server/api/routers/destination.ts @@ -4,6 +4,7 @@ import { execAsyncRemote, findDestinationById, IS_CLOUD, + obscureRclonePassword, removeDestinationById, updateDestinationById, } from "@dokploy/server"; @@ -20,13 +21,90 @@ import { destinations, } from "@/server/db/schema"; +const buildTestCommand = async (input: { + destinationType?: string; + accessKey?: string; + secretAccessKey?: string; + bucket?: string; + region?: string; + endpoint?: string; + provider?: string; + host?: string; + port?: number; + username?: string; + password?: string; + basePath?: string; +}) => { + const type = input.destinationType ?? "s3"; + + if (type === "sftp") { + const obscuredPass = await obscureRclonePassword(input.password || ""); + const flags = [ + `--sftp-host="${input.host}"`, + `--sftp-user="${input.username}"`, + `--sftp-pass="${obscuredPass}"`, + "--retries 1", + "--low-level-retries 1", + "--timeout 10s", + "--contimeout 5s", + ]; + if (input.port) flags.push(`--sftp-port="${input.port}"`); + const base = input.basePath + ? input.basePath.replace(/^\/+|\/+$/g, "") + "/" + : ""; + const remote = `:sftp:${base}`; + return `rclone ls ${flags.join(" ")} "${remote}"`; + } + + if (type === "ftp") { + const obscuredPass = await obscureRclonePassword(input.password || ""); + const flags = [ + `--ftp-host="${input.host}"`, + `--ftp-user="${input.username}"`, + `--ftp-pass="${obscuredPass}"`, + "--retries 1", + "--low-level-retries 1", + "--timeout 10s", + "--contimeout 5s", + ]; + if (input.port) flags.push(`--ftp-port="${input.port}"`); + const base = input.basePath + ? input.basePath.replace(/^\/+|\/+$/g, "") + "/" + : ""; + const remote = `:ftp:${base}`; + return `rclone ls ${flags.join(" ")} "${remote}"`; + } + + // S3 + const rcloneFlags = [ + `--s3-access-key-id="${input.accessKey}"`, + `--s3-secret-access-key="${input.secretAccessKey}"`, + `--s3-region="${input.region}"`, + `--s3-endpoint="${input.endpoint}"`, + "--s3-no-check-bucket", + "--s3-force-path-style", + "--retries 1", + "--low-level-retries 1", + "--timeout 10s", + "--contimeout 5s", + ]; + if (input.provider) { + rcloneFlags.unshift(`--s3-provider="${input.provider}"`); + } + return `rclone ls ${rcloneFlags.join(" ")} ":s3:${input.bucket}"`; +}; + export const destinationRouter = createTRPCRouter({ create: withPermission("destination", "create") .input(apiCreateDestination) .mutation(async ({ input, ctx }) => { try { + const data = { ...input }; + if (data.destinationType !== "s3" && data.password) { + data.password = await obscureRclonePassword(data.password); + } const result = await createDestintation( - input, + data, ctx.session.activeOrganizationId, ); await audit(ctx, { @@ -47,26 +125,8 @@ export const destinationRouter = createTRPCRouter({ testConnection: withPermission("destination", "create") .input(apiCreateDestination) .mutation(async ({ input }) => { - const { secretAccessKey, bucket, region, endpoint, accessKey, provider } = - input; try { - const rcloneFlags = [ - `--s3-access-key-id="${accessKey}"`, - `--s3-secret-access-key="${secretAccessKey}"`, - `--s3-region="${region}"`, - `--s3-endpoint="${endpoint}"`, - "--s3-no-check-bucket", - "--s3-force-path-style", - "--retries 1", - "--low-level-retries 1", - "--timeout 10s", - "--contimeout 5s", - ]; - if (provider) { - rcloneFlags.unshift(`--s3-provider="${provider}"`); - } - const rcloneDestination = `:s3:${bucket}`; - const rcloneCommand = `rclone ls ${rcloneFlags.join(" ")} "${rcloneDestination}"`; + const rcloneCommand = await buildTestCommand(input); if (IS_CLOUD && !input.serverId) { throw new TRPCError({ @@ -86,7 +146,7 @@ export const destinationRouter = createTRPCRouter({ message: error instanceof Error ? error?.message - : "Error connecting to bucket", + : "Error connecting to destination", cause: error, }); } @@ -147,8 +207,12 @@ export const destinationRouter = createTRPCRouter({ message: "You are not allowed to update this destination", }); } + const data = { ...input }; + if (data.destinationType !== "s3" && data.password) { + data.password = await obscureRclonePassword(data.password); + } const result = await updateDestinationById(input.destinationId, { - ...input, + ...data, organizationId: ctx.session.activeOrganizationId, }); await audit(ctx, { diff --git a/packages/server/src/db/schema/destination.ts b/packages/server/src/db/schema/destination.ts index 8e51aef919..57c95f7089 100644 --- a/packages/server/src/db/schema/destination.ts +++ b/packages/server/src/db/schema/destination.ts @@ -1,5 +1,5 @@ import { relations } from "drizzle-orm"; -import { pgTable, text, timestamp } from "drizzle-orm/pg-core"; +import { integer, pgTable, text, timestamp } from "drizzle-orm/pg-core"; import { createInsertSchema } from "drizzle-zod"; import { nanoid } from "nanoid"; import { z } from "zod"; @@ -13,11 +13,17 @@ export const destinations = pgTable("destination", { .$defaultFn(() => nanoid()), name: text("name").notNull(), provider: text("provider"), - accessKey: text("accessKey").notNull(), - secretAccessKey: text("secretAccessKey").notNull(), - bucket: text("bucket").notNull(), - region: text("region").notNull(), - endpoint: text("endpoint").notNull(), + accessKey: text("accessKey").notNull().default(""), + secretAccessKey: text("secretAccessKey").notNull().default(""), + bucket: text("bucket").notNull().default(""), + region: text("region").notNull().default(""), + endpoint: text("endpoint").notNull().default(""), + destinationType: text("destinationType").notNull().default("s3"), + host: text("host"), + port: integer("port"), + username: text("username"), + password: text("password"), + basePath: text("basePath"), organizationId: text("organizationId") .notNull() .references(() => organization.id, { onDelete: "cascade" }), @@ -58,7 +64,32 @@ export const apiCreateDestination = createSchema }) .required() .extend({ + destinationType: z.enum(["s3", "sftp", "ftp"]).default("s3"), + host: z.string().optional(), + port: z.number().optional(), + username: z.string().optional(), + password: z.string().optional(), + basePath: z.string().optional(), serverId: z.string().optional(), + }) + .superRefine((data, ctx) => { + if (data.destinationType === "s3") { + if (!data.accessKey) + ctx.addIssue({ code: "custom", path: ["accessKey"], message: "Access Key is required" }); + if (!data.secretAccessKey) + ctx.addIssue({ code: "custom", path: ["secretAccessKey"], message: "Secret Access Key is required" }); + if (!data.bucket) + ctx.addIssue({ code: "custom", path: ["bucket"], message: "Bucket is required" }); + if (!data.endpoint) + ctx.addIssue({ code: "custom", path: ["endpoint"], message: "Endpoint is required" }); + } else { + if (!data.host) + ctx.addIssue({ code: "custom", path: ["host"], message: "Host is required" }); + if (!data.username) + ctx.addIssue({ code: "custom", path: ["username"], message: "Username is required" }); + if (!data.password) + ctx.addIssue({ code: "custom", path: ["password"], message: "Password is required" }); + } }); export const apiFindOneDestination = z.object({ @@ -84,5 +115,30 @@ export const apiUpdateDestination = createSchema }) .required() .extend({ + destinationType: z.enum(["s3", "sftp", "ftp"]).default("s3"), + host: z.string().optional(), + port: z.number().optional(), + username: z.string().optional(), + password: z.string().optional(), + basePath: z.string().optional(), serverId: z.string().optional(), + }) + .superRefine((data, ctx) => { + if (data.destinationType === "s3") { + if (!data.accessKey) + ctx.addIssue({ code: "custom", path: ["accessKey"], message: "Access Key is required" }); + if (!data.secretAccessKey) + ctx.addIssue({ code: "custom", path: ["secretAccessKey"], message: "Secret Access Key is required" }); + if (!data.bucket) + ctx.addIssue({ code: "custom", path: ["bucket"], message: "Bucket is required" }); + if (!data.endpoint) + ctx.addIssue({ code: "custom", path: ["endpoint"], message: "Endpoint is required" }); + } else { + if (!data.host) + ctx.addIssue({ code: "custom", path: ["host"], message: "Host is required" }); + if (!data.username) + ctx.addIssue({ code: "custom", path: ["username"], message: "Username is required" }); + if (!data.password) + ctx.addIssue({ code: "custom", path: ["password"], message: "Password is required" }); + } }); diff --git a/packages/server/src/utils/backups/compose.ts b/packages/server/src/utils/backups/compose.ts index 34f6d2a9b4..3e801e158f 100644 --- a/packages/server/src/utils/backups/compose.ts +++ b/packages/server/src/utils/backups/compose.ts @@ -8,7 +8,7 @@ import { findEnvironmentById } from "@dokploy/server/services/environment"; import { findProjectById } from "@dokploy/server/services/project"; import { sendDatabaseBackupNotifications } from "../notifications/database-backup"; import { execAsync, execAsyncRemote } from "../process/execAsync"; -import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils"; +import { getBackupCommand, getRcloneFlags, getRcloneRemote, normalizeS3Path } from "./utils"; export const runComposeBackup = async ( compose: Compose, @@ -29,8 +29,8 @@ export const runComposeBackup = async ( }); try { - const rcloneFlags = getS3Credentials(destination); - const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`; + const rcloneFlags = getRcloneFlags(destination); + const rcloneDestination = getRcloneRemote(destination, bucketDestination); const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`; const backupCommand = getBackupCommand( diff --git a/packages/server/src/utils/backups/index.ts b/packages/server/src/utils/backups/index.ts index 71eeda7ea5..89f9a34168 100644 --- a/packages/server/src/utils/backups/index.ts +++ b/packages/server/src/utils/backups/index.ts @@ -10,7 +10,7 @@ import { startLogCleanup } from "../access-log/handler"; import { cleanupAll } from "../docker/utils"; import { sendDockerCleanupNotifications } from "../notifications/docker-cleanup"; import { execAsync, execAsyncRemote } from "../process/execAsync"; -import { getS3Credentials, normalizeS3Path, scheduleBackup } from "./utils"; +import { getRcloneFlags, getRcloneRemote, normalizeS3Path, scheduleBackup } from "./utils"; export const initCronJobs = async () => { console.log("Setting up cron jobs...."); @@ -129,9 +129,9 @@ export const keepLatestNBackups = async ( if (!backup.keepLatestCount) return; try { - const rcloneFlags = getS3Credentials(backup.destination); + const rcloneFlags = getRcloneFlags(backup.destination); const appName = getServiceAppName(backup); - const backupFilesPath = `:s3:${backup.destination.bucket}/${appName}/${normalizeS3Path(backup.prefix)}`; + const backupFilesPath = getRcloneRemote(backup.destination, `${appName}/${normalizeS3Path(backup.prefix)}`); // --include "*.sql.gz" or "*.zip" ensures nothing else other than the dokploy backup files are touched by rclone const rcloneList = `rclone lsf ${rcloneFlags.join(" ")} --include "*${backup.databaseType === "web-server" ? ".zip" : ".sql.gz"}" ${backupFilesPath}`; diff --git a/packages/server/src/utils/backups/mariadb.ts b/packages/server/src/utils/backups/mariadb.ts index 089b3cb046..b1ee6a21af 100644 --- a/packages/server/src/utils/backups/mariadb.ts +++ b/packages/server/src/utils/backups/mariadb.ts @@ -8,7 +8,7 @@ import type { Mariadb } from "@dokploy/server/services/mariadb"; import { findProjectById } from "@dokploy/server/services/project"; import { sendDatabaseBackupNotifications } from "../notifications/database-backup"; import { execAsync, execAsyncRemote } from "../process/execAsync"; -import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils"; +import { getBackupCommand, getRcloneFlags, getRcloneRemote, normalizeS3Path } from "./utils"; export const runMariadbBackup = async ( mariadb: Mariadb, @@ -27,8 +27,8 @@ export const runMariadbBackup = async ( description: "MariaDB Backup", }); try { - const rcloneFlags = getS3Credentials(destination); - const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`; + const rcloneFlags = getRcloneFlags(destination); + const rcloneDestination = getRcloneRemote(destination, bucketDestination); const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`; const backupCommand = getBackupCommand( diff --git a/packages/server/src/utils/backups/mongo.ts b/packages/server/src/utils/backups/mongo.ts index d1b04e68b5..9fb7b412c6 100644 --- a/packages/server/src/utils/backups/mongo.ts +++ b/packages/server/src/utils/backups/mongo.ts @@ -8,7 +8,7 @@ import type { Mongo } from "@dokploy/server/services/mongo"; import { findProjectById } from "@dokploy/server/services/project"; import { sendDatabaseBackupNotifications } from "../notifications/database-backup"; import { execAsync, execAsyncRemote } from "../process/execAsync"; -import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils"; +import { getBackupCommand, getRcloneFlags, getRcloneRemote, normalizeS3Path } from "./utils"; export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => { const { environmentId, name, appName } = mongo; @@ -24,8 +24,8 @@ export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => { description: "MongoDB Backup", }); try { - const rcloneFlags = getS3Credentials(destination); - const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`; + const rcloneFlags = getRcloneFlags(destination); + const rcloneDestination = getRcloneRemote(destination, bucketDestination); const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`; const backupCommand = getBackupCommand( diff --git a/packages/server/src/utils/backups/mysql.ts b/packages/server/src/utils/backups/mysql.ts index 461a17bf90..8b9f3f19f2 100644 --- a/packages/server/src/utils/backups/mysql.ts +++ b/packages/server/src/utils/backups/mysql.ts @@ -8,7 +8,7 @@ import type { MySql } from "@dokploy/server/services/mysql"; import { findProjectById } from "@dokploy/server/services/project"; import { sendDatabaseBackupNotifications } from "../notifications/database-backup"; import { execAsync, execAsyncRemote } from "../process/execAsync"; -import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils"; +import { getBackupCommand, getRcloneFlags, getRcloneRemote, normalizeS3Path } from "./utils"; export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => { const { environmentId, name, appName } = mysql; @@ -25,8 +25,8 @@ export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => { }); try { - const rcloneFlags = getS3Credentials(destination); - const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`; + const rcloneFlags = getRcloneFlags(destination); + const rcloneDestination = getRcloneRemote(destination, bucketDestination); const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`; diff --git a/packages/server/src/utils/backups/postgres.ts b/packages/server/src/utils/backups/postgres.ts index 3371b0cf9a..2bc2678061 100644 --- a/packages/server/src/utils/backups/postgres.ts +++ b/packages/server/src/utils/backups/postgres.ts @@ -8,7 +8,7 @@ import type { Postgres } from "@dokploy/server/services/postgres"; import { findProjectById } from "@dokploy/server/services/project"; import { sendDatabaseBackupNotifications } from "../notifications/database-backup"; import { execAsync, execAsyncRemote } from "../process/execAsync"; -import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils"; +import { getBackupCommand, getRcloneFlags, getRcloneRemote, normalizeS3Path } from "./utils"; export const runPostgresBackup = async ( postgres: Postgres, @@ -28,8 +28,8 @@ export const runPostgresBackup = async ( const backupFileName = `${new Date().toISOString()}.sql.gz`; const bucketDestination = `${appName}/${normalizeS3Path(prefix)}${backupFileName}`; try { - const rcloneFlags = getS3Credentials(destination); - const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`; + const rcloneFlags = getRcloneFlags(destination); + const rcloneDestination = getRcloneRemote(destination, bucketDestination); const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`; diff --git a/packages/server/src/utils/backups/utils.ts b/packages/server/src/utils/backups/utils.ts index f30577a53b..8c0ec9dec5 100644 --- a/packages/server/src/utils/backups/utils.ts +++ b/packages/server/src/utils/backups/utils.ts @@ -2,6 +2,7 @@ import { logger } from "@dokploy/server/lib/logger"; import type { BackupSchedule } from "@dokploy/server/services/backup"; import type { Destination } from "@dokploy/server/services/destination"; import { scheduledJobs, scheduleJob } from "node-schedule"; +import { execFileAsync } from "../process/execAsync"; import { keepLatestNBackups } from "."; import { runComposeBackup } from "./compose"; import { runMariadbBackup } from "./mariadb"; @@ -52,12 +53,15 @@ export const removeScheduleBackup = (backupId: string) => { }; export const normalizeS3Path = (prefix: string) => { - // Trim whitespace and remove leading/trailing slashes const normalizedPrefix = prefix.trim().replace(/^\/+|\/+$/g, ""); - // Return empty string if prefix is empty, otherwise append trailing slash return normalizedPrefix ? `${normalizedPrefix}/` : ""; }; +export const obscureRclonePassword = async (plainPassword: string): Promise => { + const { stdout } = await execFileAsync("rclone", ["obscure", plainPassword]); + return stdout.trim(); +}; + export const getS3Credentials = (destination: Destination) => { const { accessKey, secretAccessKey, region, endpoint, provider } = destination; @@ -77,6 +81,47 @@ export const getS3Credentials = (destination: Destination) => { return rcloneFlags; }; +export const getRcloneFlags = (destination: Destination): string[] => { + const type = (destination as any).destinationType ?? "s3"; + if (type === "sftp") { + const flags = [ + `--sftp-host="${(destination as any).host}"`, + `--sftp-user="${(destination as any).username}"`, + `--sftp-pass="${(destination as any).password}"`, + ]; + if ((destination as any).port) flags.push(`--sftp-port="${(destination as any).port}"`); + return flags; + } + if (type === "ftp") { + const flags = [ + `--ftp-host="${(destination as any).host}"`, + `--ftp-user="${(destination as any).username}"`, + `--ftp-pass="${(destination as any).password}"`, + ]; + if ((destination as any).port) flags.push(`--ftp-port="${(destination as any).port}"`); + return flags; + } + return getS3Credentials(destination); +}; + +export const getRcloneRemote = (destination: Destination, remotePath: string): string => { + const type = (destination as any).destinationType ?? "s3"; + if (type === "sftp") { + const base = (destination as any).basePath + ? (destination as any).basePath.replace(/^\/+|\/+$/g, "") + "/" + : ""; + return remotePath ? `:sftp:${base}${remotePath}` : `:sftp:${base}`; + } + if (type === "ftp") { + const base = (destination as any).basePath + ? (destination as any).basePath.replace(/^\/+|\/+$/g, "") + "/" + : ""; + return remotePath ? `:ftp:${base}${remotePath}` : `:ftp:${base}`; + } + const bucket = destination.bucket; + return remotePath ? `:s3:${bucket}/${remotePath}` : `:s3:${bucket}`; +}; + export const getPostgresBackupCommand = ( database: string, databaseUser: string, @@ -255,16 +300,16 @@ export const getBackupCommand = ( } echo "[$(date)] ✅ backup completed successfully" >> ${logPath}; - echo "[$(date)] Starting upload to S3..." >> ${logPath}; + echo "[$(date)] Starting upload to destination..." >> ${logPath}; # Run the upload command and capture the exit status UPLOAD_OUTPUT=$(${backupCommand} | ${rcloneCommand} 2>&1 >/dev/null) || { - echo "[$(date)] ❌ Error: Upload to S3 failed" >> ${logPath}; + echo "[$(date)] ❌ Error: Upload to destination failed" >> ${logPath}; echo "Error: $UPLOAD_OUTPUT" >> ${logPath}; exit 1; } - echo "[$(date)] ✅ Upload to S3 completed successfully" >> ${logPath}; + echo "[$(date)] ✅ Upload to destination completed successfully" >> ${logPath}; echo "Backup done ✅" >> ${logPath}; `; }; diff --git a/packages/server/src/utils/backups/web-server.ts b/packages/server/src/utils/backups/web-server.ts index a6ab20a8c5..0d5229a2a9 100644 --- a/packages/server/src/utils/backups/web-server.ts +++ b/packages/server/src/utils/backups/web-server.ts @@ -10,7 +10,7 @@ import { } from "@dokploy/server/services/deployment"; import { findDestinationById } from "@dokploy/server/services/destination"; import { execAsync } from "../process/execAsync"; -import { getS3Credentials, normalizeS3Path } from "./utils"; +import { getRcloneFlags, getRcloneRemote, normalizeS3Path } from "./utils"; export const runWebServerBackup = async (backup: BackupSchedule) => { if (IS_CLOUD) { @@ -26,12 +26,15 @@ export const runWebServerBackup = async (backup: BackupSchedule) => { try { const destination = await findDestinationById(backup.destinationId); - const rcloneFlags = getS3Credentials(destination); + const rcloneFlags = getRcloneFlags(destination); const timestamp = new Date().toISOString().replace(/[:.]/g, "-"); const { BASE_PATH } = paths(); const tempDir = await mkdtemp(join(tmpdir(), "dokploy-backup-")); const backupFileName = `webserver-backup-${timestamp}.zip`; - const s3Path = `:s3:${destination.bucket}/${backup.appName}/${normalizeS3Path(backup.prefix)}${backupFileName}`; + const remotePath = getRcloneRemote( + destination, + `${backup.appName}/${normalizeS3Path(backup.prefix)}${backupFileName}`, + ); try { await execAsync(`mkdir -p ${tempDir}/filesystem`); @@ -79,10 +82,10 @@ export const runWebServerBackup = async (backup: BackupSchedule) => { writeStream.write("Zipped database and filesystem\n"); - const uploadCommand = `rclone copyto ${rcloneFlags.join(" ")} "${tempDir}/${backupFileName}" "${s3Path}"`; - writeStream.write("Running command to upload backup to S3\n"); + const uploadCommand = `rclone copyto ${rcloneFlags.join(" ")} "${tempDir}/${backupFileName}" "${remotePath}"`; + writeStream.write("Running command to upload backup to destination\n"); await execAsync(uploadCommand); - writeStream.write("Uploaded backup to S3 ✅\n"); + writeStream.write("Uploaded backup to destination ✅\n"); writeStream.end(); await updateDeploymentStatus(deployment.deploymentId, "done"); return true; diff --git a/packages/server/src/utils/restore/compose.ts b/packages/server/src/utils/restore/compose.ts index 10797a51d6..f5ad56fbe3 100644 --- a/packages/server/src/utils/restore/compose.ts +++ b/packages/server/src/utils/restore/compose.ts @@ -2,7 +2,7 @@ import type { apiRestoreBackup } from "@dokploy/server/db/schema"; import type { Compose } from "@dokploy/server/services/compose"; import type { Destination } from "@dokploy/server/services/destination"; import type { z } from "zod"; -import { getS3Credentials } from "../backups/utils"; +import { getRcloneFlags, getRcloneRemote } from "../backups/utils"; import { execAsync, execAsyncRemote } from "../process/execAsync"; import { getRestoreCommand } from "./utils"; @@ -23,9 +23,8 @@ export const restoreComposeBackup = async ( } const { serverId, appName, composeType } = compose; - const rcloneFlags = getS3Credentials(destination); - const bucketPath = `:s3:${destination.bucket}`; - const backupPath = `${bucketPath}/${backupInput.backupFile}`; + const rcloneFlags = getRcloneFlags(destination); + const backupPath = getRcloneRemote(destination, backupInput.backupFile); let rcloneCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip`; if (backupInput.metadata?.mongo) { diff --git a/packages/server/src/utils/restore/mariadb.ts b/packages/server/src/utils/restore/mariadb.ts index ffbceba765..7b5bc38faa 100644 --- a/packages/server/src/utils/restore/mariadb.ts +++ b/packages/server/src/utils/restore/mariadb.ts @@ -2,7 +2,7 @@ import type { apiRestoreBackup } from "@dokploy/server/db/schema"; import type { Destination } from "@dokploy/server/services/destination"; import type { Mariadb } from "@dokploy/server/services/mariadb"; import type { z } from "zod"; -import { getS3Credentials } from "../backups/utils"; +import { getRcloneFlags, getRcloneRemote } from "../backups/utils"; import { execAsync, execAsyncRemote } from "../process/execAsync"; import { getRestoreCommand } from "./utils"; @@ -15,9 +15,8 @@ export const restoreMariadbBackup = async ( try { const { appName, serverId, databaseUser, databasePassword } = mariadb; - const rcloneFlags = getS3Credentials(destination); - const bucketPath = `:s3:${destination.bucket}`; - const backupPath = `${bucketPath}/${backupInput.backupFile}`; + const rcloneFlags = getRcloneFlags(destination); + const backupPath = getRcloneRemote(destination, backupInput.backupFile); const rcloneCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip`; diff --git a/packages/server/src/utils/restore/mongo.ts b/packages/server/src/utils/restore/mongo.ts index 4329a49857..8cd3fd1dad 100644 --- a/packages/server/src/utils/restore/mongo.ts +++ b/packages/server/src/utils/restore/mongo.ts @@ -2,7 +2,7 @@ import type { apiRestoreBackup } from "@dokploy/server/db/schema"; import type { Destination } from "@dokploy/server/services/destination"; import type { Mongo } from "@dokploy/server/services/mongo"; import type { z } from "zod"; -import { getS3Credentials } from "../backups/utils"; +import { getRcloneFlags, getRcloneRemote } from "../backups/utils"; import { execAsync, execAsyncRemote } from "../process/execAsync"; import { getRestoreCommand } from "./utils"; @@ -15,9 +15,8 @@ export const restoreMongoBackup = async ( try { const { appName, databasePassword, databaseUser, serverId } = mongo; - const rcloneFlags = getS3Credentials(destination); - const bucketPath = `:s3:${destination.bucket}`; - const backupPath = `${bucketPath}/${backupInput.backupFile}`; + const rcloneFlags = getRcloneFlags(destination); + const backupPath = getRcloneRemote(destination, backupInput.backupFile); const rcloneCommand = `rclone copy ${rcloneFlags.join(" ")} "${backupPath}"`; const command = getRestoreCommand({ diff --git a/packages/server/src/utils/restore/mysql.ts b/packages/server/src/utils/restore/mysql.ts index f5187242cf..8e4c662de5 100644 --- a/packages/server/src/utils/restore/mysql.ts +++ b/packages/server/src/utils/restore/mysql.ts @@ -2,7 +2,7 @@ import type { apiRestoreBackup } from "@dokploy/server/db/schema"; import type { Destination } from "@dokploy/server/services/destination"; import type { MySql } from "@dokploy/server/services/mysql"; import type { z } from "zod"; -import { getS3Credentials } from "../backups/utils"; +import { getRcloneFlags, getRcloneRemote } from "../backups/utils"; import { execAsync, execAsyncRemote } from "../process/execAsync"; import { getRestoreCommand } from "./utils"; @@ -15,9 +15,8 @@ export const restoreMySqlBackup = async ( try { const { appName, databaseRootPassword, serverId } = mysql; - const rcloneFlags = getS3Credentials(destination); - const bucketPath = `:s3:${destination.bucket}`; - const backupPath = `${bucketPath}/${backupInput.backupFile}`; + const rcloneFlags = getRcloneFlags(destination); + const backupPath = getRcloneRemote(destination, backupInput.backupFile); const rcloneCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip`; diff --git a/packages/server/src/utils/restore/postgres.ts b/packages/server/src/utils/restore/postgres.ts index 19f32989f0..7e95167109 100644 --- a/packages/server/src/utils/restore/postgres.ts +++ b/packages/server/src/utils/restore/postgres.ts @@ -2,7 +2,7 @@ import type { apiRestoreBackup } from "@dokploy/server/db/schema"; import type { Destination } from "@dokploy/server/services/destination"; import type { Postgres } from "@dokploy/server/services/postgres"; import type { z } from "zod"; -import { getS3Credentials } from "../backups/utils"; +import { getRcloneFlags, getRcloneRemote } from "../backups/utils"; import { execAsync, execAsyncRemote } from "../process/execAsync"; import { getRestoreCommand } from "./utils"; @@ -15,10 +15,8 @@ export const restorePostgresBackup = async ( try { const { appName, databaseUser, serverId } = postgres; - const rcloneFlags = getS3Credentials(destination); - const bucketPath = `:s3:${destination.bucket}`; - - const backupPath = `${bucketPath}/${backupInput.backupFile}`; + const rcloneFlags = getRcloneFlags(destination); + const backupPath = getRcloneRemote(destination, backupInput.backupFile); const rcloneCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip`; diff --git a/packages/server/src/utils/restore/web-server.ts b/packages/server/src/utils/restore/web-server.ts index 683a1898ae..8167802a24 100644 --- a/packages/server/src/utils/restore/web-server.ts +++ b/packages/server/src/utils/restore/web-server.ts @@ -3,7 +3,7 @@ import { tmpdir } from "node:os"; import { join } from "node:path"; import { IS_CLOUD, paths } from "@dokploy/server/constants"; import type { Destination } from "@dokploy/server/services/destination"; -import { getS3Credentials } from "../backups/utils"; +import { getRcloneFlags, getRcloneRemote } from "../backups/utils"; import { execAsync } from "../process/execAsync"; export const restoreWebServerBackup = async ( @@ -15,9 +15,8 @@ export const restoreWebServerBackup = async ( return; } try { - const rcloneFlags = getS3Credentials(destination); - const bucketPath = `:s3:${destination.bucket}`; - const backupPath = `${bucketPath}/${backupFile}`; + const rcloneFlags = getRcloneFlags(destination); + const backupPath = getRcloneRemote(destination, backupFile); const { BASE_PATH } = paths(); // Create a temporary directory outside of BASE_PATH @@ -33,7 +32,7 @@ export const restoreWebServerBackup = async ( await execAsync(`mkdir -p ${tempDir}`); // Download backup from S3 - emit("Downloading backup from S3..."); + emit("Downloading backup from destination..."); await execAsync( `rclone copyto ${rcloneFlags.join(" ")} "${backupPath}" "${tempDir}/${backupFile}"`, ); diff --git a/packages/server/src/utils/volume-backups/backup.ts b/packages/server/src/utils/volume-backups/backup.ts index 79c49c81a3..6c2cc7d9b1 100644 --- a/packages/server/src/utils/volume-backups/backup.ts +++ b/packages/server/src/utils/volume-backups/backup.ts @@ -2,7 +2,7 @@ import path from "node:path"; import { paths } from "@dokploy/server/constants"; import { findComposeById } from "@dokploy/server/services/compose"; import type { findVolumeBackupById } from "@dokploy/server/services/volume-backups"; -import { getS3Credentials, normalizeS3Path } from "../backups/utils"; +import { getRcloneFlags, getRcloneRemote, normalizeS3Path } from "../backups/utils"; export const getVolumeServiceAppName = ( volumeBackup: Awaited>, @@ -33,8 +33,8 @@ export const backupVolume = async ( const s3AppName = getVolumeServiceAppName(volumeBackup); const backupFileName = `${volumeName}-${new Date().toISOString()}.tar`; const bucketDestination = `${s3AppName}/${normalizeS3Path(prefix || "")}${backupFileName}`; - const rcloneFlags = getS3Credentials(volumeBackup.destination); - const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`; + const rcloneFlags = getRcloneFlags(volumeBackup.destination); + const rcloneDestination = getRcloneRemote(destination, bucketDestination); const volumeBackupPath = path.join(VOLUME_BACKUPS_PATH, volumeBackup.appName); const rcloneCommand = `rclone copyto ${rcloneFlags.join(" ")} "${volumeBackupPath}/${backupFileName}" "${rcloneDestination}"`; diff --git a/packages/server/src/utils/volume-backups/restore.ts b/packages/server/src/utils/volume-backups/restore.ts index 6f6068cafc..8566233188 100644 --- a/packages/server/src/utils/volume-backups/restore.ts +++ b/packages/server/src/utils/volume-backups/restore.ts @@ -3,9 +3,9 @@ import { findApplicationById, findComposeById, findDestinationById, - getS3Credentials, paths, } from "../.."; +import { getRcloneFlags, getRcloneRemote } from "../backups/utils"; export const restoreVolume = async ( id: string, @@ -18,9 +18,8 @@ export const restoreVolume = async ( const destination = await findDestinationById(destinationId); const { VOLUME_BACKUPS_PATH } = paths(!!serverId); const volumeBackupPath = path.join(VOLUME_BACKUPS_PATH, volumeName); - const rcloneFlags = getS3Credentials(destination); - const bucketPath = `:s3:${destination.bucket}`; - const backupPath = `${bucketPath}/${backupFileName}`; + const rcloneFlags = getRcloneFlags(destination); + const backupPath = getRcloneRemote(destination, backupFileName); // Command to download backup file from S3 const downloadCommand = `rclone copyto ${rcloneFlags.join(" ")} "${backupPath}" "${volumeBackupPath}/${backupFileName}"`; diff --git a/packages/server/src/utils/volume-backups/utils.ts b/packages/server/src/utils/volume-backups/utils.ts index 6a51e765d7..03de68b3dc 100644 --- a/packages/server/src/utils/volume-backups/utils.ts +++ b/packages/server/src/utils/volume-backups/utils.ts @@ -10,7 +10,7 @@ import { execAsyncRemote, } from "@dokploy/server/utils/process/execAsync"; import { scheduledJobs, scheduleJob } from "node-schedule"; -import { getS3Credentials, normalizeS3Path } from "../backups/utils"; +import { getRcloneFlags, getRcloneRemote, normalizeS3Path } from "../backups/utils"; import { sendVolumeBackupNotifications } from "../notifications/volume-backup"; import { backupVolume, getVolumeServiceAppName } from "./backup"; @@ -80,9 +80,9 @@ const cleanupOldVolumeBackups = async ( if (!keepLatestCount) return; try { - const rcloneFlags = getS3Credentials(destination); + const rcloneFlags = getRcloneFlags(destination); const s3AppName = getVolumeServiceAppName(volumeBackup); - const backupFilesPath = `:s3:${destination.bucket}/${s3AppName}/${normalizeS3Path(prefix || "")}`; + const backupFilesPath = getRcloneRemote(destination, `${s3AppName}/${normalizeS3Path(prefix || "")}`); const listCommand = `rclone lsf ${rcloneFlags.join(" ")} --include \"${volumeName}-*.tar\" ${backupFilesPath}`; const sortAndPick = `sort -r | tail -n +$((${keepLatestCount}+1)) | xargs -I{}`; const deleteCommand = `rclone delete ${rcloneFlags.join(" ")} ${backupFilesPath}{}`;