From 71ea3c7279a41c5360fec5ba9ffe7d79107809f9 Mon Sep 17 00:00:00 2001 From: Manish Gupta Date: Fri, 11 Jul 2025 15:00:17 +0530 Subject: [PATCH 1/3] refactor: reorganize deployment structure and update build workflows - Restructure deployment directories from deploy/ to deployments/ - Move selfhost files to deployments/cli/community/ - Add new AIO community deployment setup - Update GitHub Actions workflows for new directory structure - Add Caddy proxy configuration for CE deployment - Remove deprecated AIO build files and workflows - Update build context paths in install scripts --- .github/workflows/build-aio-base.yml | 139 --- .github/workflows/build-aio-branch.yml | 207 ----- .github/workflows/build-branch.yml | 127 ++- aio/Dockerfile-app | 182 ---- aio/Dockerfile-base-full | 73 -- aio/Dockerfile-base-slim | 45 - aio/env.sh | 7 - aio/nginx.conf | 72 -- aio/pg-setup.sh | 14 - aio/postgresql.conf | 815 ------------------ aio/supervisord-app | 71 -- aio/supervisord-full-base | 38 - aio/supervisord-slim-base | 14 - apps/proxy/Caddyfile.ce | 34 + apps/proxy/Dockerfile.ce | 14 + deployments/aio/community/Dockerfile | 66 ++ deployments/aio/community/README.md | 174 ++++ deployments/aio/community/build.sh | 155 ++++ deployments/aio/community/start.sh | 169 ++++ deployments/aio/community/supervisor.conf | 115 +++ deployments/aio/community/variables.env | 53 ++ .../cli/community}/README.md | 0 .../cli/community}/build.yml | 0 .../cli/community}/docker-compose.yml | 42 +- .../cli/community}/images/download.png | Bin .../cli/community}/images/migrate-error.png | Bin .../cli/community}/images/restart.png | Bin .../cli/community}/images/started.png | Bin .../cli/community}/images/stopped.png | Bin .../cli/community}/images/upgrade.png | Bin .../cli/community}/install.sh | 4 +- .../cli/community}/migration-0.13-0.14.sh | 0 .../cli/community}/restore-airgapped.sh | 0 .../cli/community}/restore.sh | 0 .../cli/community}/variables.env | 17 +- .../kubernetes/community}/README.md | 4 +- .../swarm/community}/swarm.sh | 5 +- 37 files changed, 948 insertions(+), 1708 deletions(-) delete mode 100644 .github/workflows/build-aio-base.yml delete mode 100644 .github/workflows/build-aio-branch.yml delete mode 100644 aio/Dockerfile-app delete mode 100644 aio/Dockerfile-base-full delete mode 100644 aio/Dockerfile-base-slim delete mode 100644 aio/env.sh delete mode 100644 aio/nginx.conf delete mode 100644 aio/pg-setup.sh delete mode 100644 aio/postgresql.conf delete mode 100644 aio/supervisord-app delete mode 100644 aio/supervisord-full-base delete mode 100644 aio/supervisord-slim-base create mode 100644 apps/proxy/Caddyfile.ce create mode 100644 apps/proxy/Dockerfile.ce create mode 100644 deployments/aio/community/Dockerfile create mode 100644 deployments/aio/community/README.md create mode 100755 deployments/aio/community/build.sh create mode 100644 deployments/aio/community/start.sh create mode 100644 deployments/aio/community/supervisor.conf create mode 100644 deployments/aio/community/variables.env rename {deploy/selfhost => deployments/cli/community}/README.md (100%) rename {deploy/selfhost => deployments/cli/community}/build.yml (100%) rename {deploy/selfhost => deployments/cli/community}/docker-compose.yml (91%) rename {deploy/selfhost => deployments/cli/community}/images/download.png (100%) rename {deploy/selfhost => deployments/cli/community}/images/migrate-error.png (100%) rename {deploy/selfhost => deployments/cli/community}/images/restart.png (100%) rename {deploy/selfhost => deployments/cli/community}/images/started.png (100%) rename {deploy/selfhost => deployments/cli/community}/images/stopped.png (100%) rename {deploy/selfhost => deployments/cli/community}/images/upgrade.png (100%) rename {deploy/selfhost => deployments/cli/community}/install.sh (99%) rename {deploy/selfhost => deployments/cli/community}/migration-0.13-0.14.sh (100%) rename {deploy/selfhost => deployments/cli/community}/restore-airgapped.sh (100%) rename {deploy/selfhost => deployments/cli/community}/restore.sh (100%) rename {deploy/selfhost => deployments/cli/community}/variables.env (75%) rename {deploy/kubernetes => deployments/kubernetes/community}/README.md (75%) rename {deploy/selfhost => deployments/swarm/community}/swarm.sh (99%) diff --git a/.github/workflows/build-aio-base.yml b/.github/workflows/build-aio-base.yml deleted file mode 100644 index 3fb2958f1fa..00000000000 --- a/.github/workflows/build-aio-base.yml +++ /dev/null @@ -1,139 +0,0 @@ -name: Build AIO Base Image - -on: - workflow_dispatch: - inputs: - base_tag_name: - description: 'Base Tag Name' - required: false - default: '' - -env: - TARGET_BRANCH: ${{ github.ref_name }} - -jobs: - base_build_setup: - name: Build Preparation - runs-on: ubuntu-latest - outputs: - gh_branch_name: ${{ steps.set_env_variables.outputs.TARGET_BRANCH }} - gh_buildx_driver: ${{ steps.set_env_variables.outputs.BUILDX_DRIVER }} - gh_buildx_version: ${{ steps.set_env_variables.outputs.BUILDX_VERSION }} - gh_buildx_platforms: ${{ steps.set_env_variables.outputs.BUILDX_PLATFORMS }} - gh_buildx_endpoint: ${{ steps.set_env_variables.outputs.BUILDX_ENDPOINT }} - image_tag: ${{ steps.set_env_variables.outputs.IMAGE_TAG }} - - steps: - - id: set_env_variables - name: Set Environment Variables - run: | - echo "TARGET_BRANCH=${{ env.TARGET_BRANCH }}" >> $GITHUB_OUTPUT - - if [ "${{ github.event.inputs.base_tag_name }}" != "" ]; then - echo "IMAGE_TAG=${{ github.event.inputs.base_tag_name }}" >> $GITHUB_OUTPUT - elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then - echo "IMAGE_TAG=latest" >> $GITHUB_OUTPUT - elif [ "${{ env.TARGET_BRANCH }}" == "preview" ]; then - echo "IMAGE_TAG=preview" >> $GITHUB_OUTPUT - else - echo "IMAGE_TAG=develop" >> $GITHUB_OUTPUT - fi - - - if [ "${{ env.TARGET_BRANCH }}" == "master" ]; then - echo "BUILDX_DRIVER=cloud" >> $GITHUB_OUTPUT - echo "BUILDX_VERSION=lab:latest" >> $GITHUB_OUTPUT - echo "BUILDX_PLATFORMS=linux/amd64,linux/arm64" >> $GITHUB_OUTPUT - echo "BUILDX_ENDPOINT=makeplane/plane-dev" >> $GITHUB_OUTPUT - else - echo "BUILDX_DRIVER=docker-container" >> $GITHUB_OUTPUT - echo "BUILDX_VERSION=latest" >> $GITHUB_OUTPUT - echo "BUILDX_PLATFORMS=linux/amd64" >> $GITHUB_OUTPUT - echo "BUILDX_ENDPOINT=" >> $GITHUB_OUTPUT - fi - - - id: checkout_files - name: Checkout Files - uses: actions/checkout@v4 - - full_base_build_push: - runs-on: ubuntu-latest - needs: [base_build_setup] - env: - BASE_IMG_TAG: makeplane/plane-aio-base:full-${{ needs.base_build_setup.outputs.image_tag }} - BUILDX_DRIVER: ${{ needs.base_build_setup.outputs.gh_buildx_driver }} - BUILDX_VERSION: ${{ needs.base_build_setup.outputs.gh_buildx_version }} - BUILDX_PLATFORMS: ${{ needs.base_build_setup.outputs.gh_buildx_platforms }} - BUILDX_ENDPOINT: ${{ needs.base_build_setup.outputs.gh_buildx_endpoint }} - steps: - - name: Check out the repo - uses: actions/checkout@v4 - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - driver: ${{ env.BUILDX_DRIVER }} - version: ${{ env.BUILDX_VERSION }} - endpoint: ${{ env.BUILDX_ENDPOINT }} - - - name: Build and Push to Docker Hub - uses: docker/build-push-action@v6.9.0 - with: - context: ./aio - file: ./aio/Dockerfile-base-full - platforms: ${{ env.BUILDX_PLATFORMS }} - tags: ${{ env.BASE_IMG_TAG }} - push: true - cache-from: type=gha - cache-to: type=gha,mode=max - env: - DOCKER_BUILDKIT: 1 - DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }} - - slim_base_build_push: - runs-on: ubuntu-latest - needs: [base_build_setup] - env: - BASE_IMG_TAG: makeplane/plane-aio-base:slim-${{ needs.base_build_setup.outputs.image_tag }} - BUILDX_DRIVER: ${{ needs.base_build_setup.outputs.gh_buildx_driver }} - BUILDX_VERSION: ${{ needs.base_build_setup.outputs.gh_buildx_version }} - BUILDX_PLATFORMS: ${{ needs.base_build_setup.outputs.gh_buildx_platforms }} - BUILDX_ENDPOINT: ${{ needs.base_build_setup.outputs.gh_buildx_endpoint }} - steps: - - name: Check out the repo - uses: actions/checkout@v4 - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - driver: ${{ env.BUILDX_DRIVER }} - version: ${{ env.BUILDX_VERSION }} - endpoint: ${{ env.BUILDX_ENDPOINT }} - - - name: Build and Push to Docker Hub - uses: docker/build-push-action@v6.9.0 - with: - context: ./aio - file: ./aio/Dockerfile-base-slim - platforms: ${{ env.BUILDX_PLATFORMS }} - tags: ${{ env.BASE_IMG_TAG }} - push: true - cache-from: type=gha - cache-to: type=gha,mode=max - env: - DOCKER_BUILDKIT: 1 - DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/build-aio-branch.yml b/.github/workflows/build-aio-branch.yml deleted file mode 100644 index 3cc288354a8..00000000000 --- a/.github/workflows/build-aio-branch.yml +++ /dev/null @@ -1,207 +0,0 @@ -name: Branch Build AIO - -on: - workflow_dispatch: - inputs: - full: - description: 'Run full build' - type: boolean - required: false - default: false - slim: - description: 'Run slim build' - type: boolean - required: false - default: false - base_tag_name: - description: 'Base Tag Name' - required: false - default: '' - release: - types: [released, prereleased] - -env: - TARGET_BRANCH: ${{ github.ref_name || github.event.release.target_commitish }} - FULL_BUILD_INPUT: ${{ github.event.inputs.full }} - SLIM_BUILD_INPUT: ${{ github.event.inputs.slim }} - -jobs: - branch_build_setup: - name: Build Setup - runs-on: ubuntu-latest - outputs: - gh_branch_name: ${{ steps.set_env_variables.outputs.TARGET_BRANCH }} - flat_branch_name: ${{ steps.set_env_variables.outputs.FLAT_BRANCH_NAME }} - gh_buildx_driver: ${{ steps.set_env_variables.outputs.BUILDX_DRIVER }} - gh_buildx_version: ${{ steps.set_env_variables.outputs.BUILDX_VERSION }} - gh_buildx_platforms: ${{ steps.set_env_variables.outputs.BUILDX_PLATFORMS }} - gh_buildx_endpoint: ${{ steps.set_env_variables.outputs.BUILDX_ENDPOINT }} - aio_base_tag: ${{ steps.set_env_variables.outputs.AIO_BASE_TAG }} - do_full_build: ${{ steps.set_env_variables.outputs.DO_FULL_BUILD }} - do_slim_build: ${{ steps.set_env_variables.outputs.DO_SLIM_BUILD }} - - steps: - - id: set_env_variables - name: Set Environment Variables - run: | - if [ "${{ env.TARGET_BRANCH }}" == "master" ] || [ "${{ github.event_name }}" == "release" ]; then - echo "BUILDX_DRIVER=cloud" >> $GITHUB_OUTPUT - echo "BUILDX_VERSION=lab:latest" >> $GITHUB_OUTPUT - echo "BUILDX_PLATFORMS=linux/amd64,linux/arm64" >> $GITHUB_OUTPUT - echo "BUILDX_ENDPOINT=makeplane/plane-dev" >> $GITHUB_OUTPUT - - echo "AIO_BASE_TAG=latest" >> $GITHUB_OUTPUT - else - echo "BUILDX_DRIVER=docker-container" >> $GITHUB_OUTPUT - echo "BUILDX_VERSION=latest" >> $GITHUB_OUTPUT - echo "BUILDX_PLATFORMS=linux/amd64" >> $GITHUB_OUTPUT - echo "BUILDX_ENDPOINT=" >> $GITHUB_OUTPUT - - if [ "${{ github.event_name}}" == "workflow_dispatch" ] && [ "${{ github.event.inputs.base_tag_name }}" != "" ]; then - echo "AIO_BASE_TAG=${{ github.event.inputs.base_tag_name }}" >> $GITHUB_OUTPUT - elif [ "${{ env.TARGET_BRANCH }}" == "preview" ]; then - echo "AIO_BASE_TAG=preview" >> $GITHUB_OUTPUT - else - echo "AIO_BASE_TAG=develop" >> $GITHUB_OUTPUT - fi - fi - echo "TARGET_BRANCH=${{ env.TARGET_BRANCH }}" >> $GITHUB_OUTPUT - - if [ "${{ env.FULL_BUILD_INPUT }}" == "true" ] || [ "${{github.event_name}}" == "push" ] || [ "${{github.event_name}}" == "release" ]; then - echo "DO_FULL_BUILD=true" >> $GITHUB_OUTPUT - else - echo "DO_FULL_BUILD=false" >> $GITHUB_OUTPUT - fi - - if [ "${{ env.SLIM_BUILD_INPUT }}" == "true" ] || [ "${{github.event_name}}" == "push" ] || [ "${{github.event_name}}" == "release" ]; then - echo "DO_SLIM_BUILD=true" >> $GITHUB_OUTPUT - else - echo "DO_SLIM_BUILD=false" >> $GITHUB_OUTPUT - fi - - FLAT_BRANCH_NAME=$(echo "${{ env.TARGET_BRANCH }}" | sed 's/[^a-zA-Z0-9]/-/g') - echo "FLAT_BRANCH_NAME=$FLAT_BRANCH_NAME" >> $GITHUB_OUTPUT - - - id: checkout_files - name: Checkout Files - uses: actions/checkout@v4 - - full_build_push: - if: ${{ needs.branch_build_setup.outputs.do_full_build == 'true' }} - runs-on: ubuntu-22.04 - needs: [branch_build_setup] - env: - BUILD_TYPE: full - AIO_BASE_TAG: ${{ needs.branch_build_setup.outputs.aio_base_tag }} - AIO_IMAGE_TAGS: makeplane/plane-aio:full-${{ needs.branch_build_setup.outputs.flat_branch_name }} - TARGET_BRANCH: ${{ needs.branch_build_setup.outputs.gh_branch_name }} - BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }} - BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }} - BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }} - BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }} - steps: - - name: Set Docker Tag - run: | - if [ "${{ github.event_name }}" == "release" ]; then - TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-stable,makeplane/plane-aio:${{env.BUILD_TYPE}}-${{ github.event.release.tag_name }} - elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then - TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-latest - else - TAG=${{ env.AIO_IMAGE_TAGS }} - fi - echo "AIO_IMAGE_TAGS=${TAG}" >> $GITHUB_ENV - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - driver: ${{ env.BUILDX_DRIVER }} - version: ${{ env.BUILDX_VERSION }} - endpoint: ${{ env.BUILDX_ENDPOINT }} - - - name: Check out the repo - uses: actions/checkout@v4 - - - name: Build and Push to Docker Hub - uses: docker/build-push-action@v6.9.0 - with: - context: . - file: ./aio/Dockerfile-app - platforms: ${{ env.BUILDX_PLATFORMS }} - tags: ${{ env.AIO_IMAGE_TAGS }} - push: true - build-args: | - BASE_TAG=${{ env.AIO_BASE_TAG }} - BUILD_TYPE=${{env.BUILD_TYPE}} - cache-from: type=gha - cache-to: type=gha,mode=max - - env: - DOCKER_BUILDKIT: 1 - DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }} - - slim_build_push: - if: ${{ needs.branch_build_setup.outputs.do_slim_build == 'true' }} - runs-on: ubuntu-22.04 - needs: [branch_build_setup] - env: - BUILD_TYPE: slim - AIO_BASE_TAG: ${{ needs.branch_build_setup.outputs.aio_base_tag }} - AIO_IMAGE_TAGS: makeplane/plane-aio:slim-${{ needs.branch_build_setup.outputs.flat_branch_name }} - TARGET_BRANCH: ${{ needs.branch_build_setup.outputs.gh_branch_name }} - BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }} - BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }} - BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }} - BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }} - steps: - - name: Set Docker Tag - run: | - if [ "${{ github.event_name }}" == "release" ]; then - TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-stable,makeplane/plane-aio:${{env.BUILD_TYPE}}-${{ github.event.release.tag_name }} - elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then - TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-latest - else - TAG=${{ env.AIO_IMAGE_TAGS }} - fi - echo "AIO_IMAGE_TAGS=${TAG}" >> $GITHUB_ENV - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - driver: ${{ env.BUILDX_DRIVER }} - version: ${{ env.BUILDX_VERSION }} - endpoint: ${{ env.BUILDX_ENDPOINT }} - - - name: Check out the repo - uses: actions/checkout@v4 - - - name: Build and Push to Docker Hub - uses: docker/build-push-action@v6.9.0 - with: - context: . - file: ./aio/Dockerfile-app - platforms: ${{ env.BUILDX_PLATFORMS }} - tags: ${{ env.AIO_IMAGE_TAGS }} - push: true - build-args: | - BASE_TAG=${{ env.AIO_BASE_TAG }} - BUILD_TYPE=${{env.BUILD_TYPE}} - cache-from: type=gha - cache-to: type=gha,mode=max - - env: - DOCKER_BUILDKIT: 1 - DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/build-branch.yml b/.github/workflows/build-branch.yml index 3c4125a0feb..acd9348d2e6 100644 --- a/.github/workflows/build-branch.yml +++ b/.github/workflows/build-branch.yml @@ -25,6 +25,11 @@ on: required: false default: false type: boolean + aio_build: + description: "Build for AIO docker image" + required: false + default: false + type: boolean push: branches: - preview @@ -36,6 +41,7 @@ env: BUILD_TYPE: ${{ github.event.inputs.build_type }} RELEASE_VERSION: ${{ github.event.inputs.releaseVersion }} IS_PRERELEASE: ${{ github.event.inputs.isPrerelease }} + AIO_BUILD: ${{ github.event.inputs.aio_build }} jobs: branch_build_setup: @@ -54,11 +60,13 @@ jobs: dh_img_live: ${{ steps.set_env_variables.outputs.DH_IMG_LIVE }} dh_img_backend: ${{ steps.set_env_variables.outputs.DH_IMG_BACKEND }} dh_img_proxy: ${{ steps.set_env_variables.outputs.DH_IMG_PROXY }} + dh_img_aio: ${{ steps.set_env_variables.outputs.DH_IMG_AIO }} build_type: ${{steps.set_env_variables.outputs.BUILD_TYPE}} build_release: ${{ steps.set_env_variables.outputs.BUILD_RELEASE }} build_prerelease: ${{ steps.set_env_variables.outputs.BUILD_PRERELEASE }} release_version: ${{ steps.set_env_variables.outputs.RELEASE_VERSION }} + aio_build: ${{ steps.set_env_variables.outputs.AIO_BUILD }} steps: - id: set_env_variables @@ -84,12 +92,15 @@ jobs: echo "DH_IMG_LIVE=plane-live" >> $GITHUB_OUTPUT echo "DH_IMG_BACKEND=plane-backend" >> $GITHUB_OUTPUT echo "DH_IMG_PROXY=plane-proxy" >> $GITHUB_OUTPUT + echo "DH_IMG_AIO=plane-aio-community" >> $GITHUB_OUTPUT echo "BUILD_TYPE=${{env.BUILD_TYPE}}" >> $GITHUB_OUTPUT BUILD_RELEASE=false BUILD_PRERELEASE=false RELVERSION="latest" + BUILD_AIO=${{ env.AIO_BUILD }} + if [ "${{ env.BUILD_TYPE }}" == "Release" ]; then FLAT_RELEASE_VERSION=$(echo "${{ env.RELEASE_VERSION }}" | sed 's/[^a-zA-Z0-9.-]//g') echo "FLAT_RELEASE_VERSION=${FLAT_RELEASE_VERSION}" >> $GITHUB_OUTPUT @@ -108,10 +119,14 @@ jobs: if [ "${{ env.IS_PRERELEASE }}" == "true" ]; then BUILD_PRERELEASE=true fi + + BUILD_AIO=true fi + echo "BUILD_RELEASE=${BUILD_RELEASE}" >> $GITHUB_OUTPUT echo "BUILD_PRERELEASE=${BUILD_PRERELEASE}" >> $GITHUB_OUTPUT echo "RELEASE_VERSION=${RELVERSION}" >> $GITHUB_OUTPUT + echo "AIO_BUILD=${BUILD_AIO}" >> $GITHUB_OUTPUT - id: checkout_files name: Checkout Files @@ -242,12 +257,101 @@ jobs: dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} docker-image-owner: makeplane docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_proxy }} - build-context: ./nginx - dockerfile-path: ./nginx/Dockerfile + build-context: ./apps/proxy + dockerfile-path: ./apps/proxy/Dockerfile.ce + buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }} + buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }} + buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }} + buildx-endpoint: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }} + + branch_build_push_aio: + if: ${{ needs.branch_build_setup.outputs.aio_build == 'true' }} + name: Build-Push AIO Docker Image + runs-on: ubuntu-22.04 + needs: [ + branch_build_setup, + branch_build_push_admin, + branch_build_push_web, + branch_build_push_space, + branch_build_push_live, + branch_build_push_api, + branch_build_push_proxy + ] + steps: + - name: Checkout Files + uses: actions/checkout@v4 + + - name: Prepare AIO Assets + id: prepare_aio_assets + run: | + cd deployments/aio/community + + if [ "${{ needs.branch_build_setup.outputs.build_type }}" == "Release" ]; then + aio_version=${{ needs.branch_build_setup.outputs.release_version }} + else + aio_version=${{ needs.branch_build_setup.outputs.gh_branch_name }} + fi + bash ./build.sh --release $aio_version + echo "AIO_BUILD_VERSION=${aio_version}" >> $GITHUB_OUTPUT + + - name: Upload AIO Assets + uses: actions/upload-artifact@v4 + with: + path: ./deployments/aio/community/dist + name: aio-assets-dist + + - name: AIO Build and Push + uses: makeplane/actions/build-push@v1.1.0 + with: + build-release: ${{ needs.branch_build_setup.outputs.build_release }} + build-prerelease: ${{ needs.branch_build_setup.outputs.build_prerelease }} + release-version: ${{ needs.branch_build_setup.outputs.release_version }} + dockerhub-username: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} + docker-image-owner: makeplane + docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_aio }} + build-context: ./deployments/aio/community + dockerfile-path: ./deployments/aio/community/Dockerfile buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }} buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }} buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }} buildx-endpoint: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }} + additional-assets: aio-assets-dist + additional-assets-dir: ./deployments/aio/community/dist + build-args: | + PLANE_VERSION=${{ steps.prepare_aio_assets.outputs.AIO_BUILD_VERSION }} + + upload_build_assets: + name: Upload Build Assets + runs-on: ubuntu-22.04 + needs: [branch_build_setup, branch_build_push_admin, branch_build_push_web, branch_build_push_space, branch_build_push_live, branch_build_push_api, branch_build_push_proxy] + steps: + - name: Checkout Files + uses: actions/checkout@v4 + + - name: Update Assets + run: | + if [ "${{ needs.branch_build_setup.outputs.build_type }}" == "Release" ]; then + REL_VERSION=${{ needs.branch_build_setup.outputs.release_version }} + else + REL_VERSION=${{ needs.branch_build_setup.outputs.gh_branch_name }} + fi + + cp ./deployments/cli/community/install.sh deployments/cli/community/setup.sh + sed -i 's/${APP_RELEASE:-stable}/${APP_RELEASE:-'${REL_VERSION}'}/g' deployments/cli/community/docker-compose.yml + # sed -i 's/APP_RELEASE=stable/APP_RELEASE='${REL_VERSION}'/g' deployments/cli/community/variables.env + + - name: Upload Assets + uses: actions/upload-artifact@v4 + with: + name: community-assets + path: | + ./deployments/cli/community/setup.sh + ./deployments/cli/community/restore.sh + ./deployments/cli/community/restore-airgapped.sh + ./deployments/cli/community/docker-compose.yml + ./deployments/cli/community/variables.env + ./deployments/swarm/community/swarm.sh publish_release: if: ${{ needs.branch_build_setup.outputs.build_type == 'Release' }} @@ -271,9 +375,9 @@ jobs: - name: Update Assets run: | - cp ./deploy/selfhost/install.sh deploy/selfhost/setup.sh - sed -i 's/${APP_RELEASE:-stable}/${APP_RELEASE:-'${REL_VERSION}'}/g' deploy/selfhost/docker-compose.yml - # sed -i 's/APP_RELEASE=stable/APP_RELEASE='${REL_VERSION}'/g' deploy/selfhost/variables.env + cp ./deployments/cli/community/install.sh deployments/cli/community/setup.sh + sed -i 's/${APP_RELEASE:-stable}/${APP_RELEASE:-'${REL_VERSION}'}/g' deployments/cli/community/docker-compose.yml + # sed -i 's/APP_RELEASE=stable/APP_RELEASE='${REL_VERSION}'/g' deployments/cli/community/variables.env - name: Create Release id: create_release @@ -287,9 +391,10 @@ jobs: prerelease: ${{ env.IS_PRERELEASE }} generate_release_notes: true files: | - ${{ github.workspace }}/deploy/selfhost/setup.sh - ${{ github.workspace }}/deploy/selfhost/swarm.sh - ${{ github.workspace }}/deploy/selfhost/restore.sh - ${{ github.workspace }}/deploy/selfhost/restore-airgapped.sh - ${{ github.workspace }}/deploy/selfhost/docker-compose.yml - ${{ github.workspace }}/deploy/selfhost/variables.env + ${{ github.workspace }}/deployments/cli/community/setup.sh + ${{ github.workspace }}/deployments/cli/community/restore.sh + ${{ github.workspace }}/deployments/cli/community/restore-airgapped.sh + ${{ github.workspace }}/deployments/cli/community/docker-compose.yml + ${{ github.workspace }}/deployments/cli/community/variables.env + ${{ github.workspace }}/deployments/swarm/community/swarm.sh + diff --git a/aio/Dockerfile-app b/aio/Dockerfile-app deleted file mode 100644 index 6406c23d934..00000000000 --- a/aio/Dockerfile-app +++ /dev/null @@ -1,182 +0,0 @@ -ARG BASE_TAG=develop -ARG BUILD_TYPE=full -# ***************************************************************************** -# STAGE 1: Build the project -# ***************************************************************************** -FROM node:18-alpine AS builder -RUN apk add --no-cache libc6-compat -# Set working directory -WORKDIR /app - -RUN yarn global add turbo -COPY . . - -RUN turbo prune --scope=web --scope=space --scope=admin --docker - -# ***************************************************************************** -# STAGE 2: Install dependencies & build the project -# ***************************************************************************** -# Add lockfile and package.json's of isolated subworkspace -FROM node:18-alpine AS installer - -RUN apk add --no-cache libc6-compat -WORKDIR /app - -# First install the dependencies (as they change less often) -COPY .gitignore .gitignore -COPY --from=builder /app/out/json/ . -COPY --from=builder /app/out/yarn.lock ./yarn.lock -RUN yarn install - -# # Build the project -COPY --from=builder /app/out/full/ . -COPY turbo.json turbo.json - -ARG NEXT_PUBLIC_API_BASE_URL="" -ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL - -ARG NEXT_PUBLIC_ADMIN_BASE_URL="" -ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL - -ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode" -ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH - -ARG NEXT_PUBLIC_SPACE_BASE_URL="" -ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL - -ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces" -ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH - -ARG NEXT_PUBLIC_WEB_BASE_URL="" -ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL - -ENV NEXT_TELEMETRY_DISABLED=1 -ENV TURBO_TELEMETRY_DISABLED=1 - -RUN yarn turbo run build --filter=web --filter=space --filter=admin - -# ***************************************************************************** -# STAGE 3: Copy the project and start it -# ***************************************************************************** -FROM makeplane/plane-aio-base:${BUILD_TYPE}-${BASE_TAG} AS runner - -WORKDIR /app - -SHELL [ "/bin/bash", "-c" ] - -# PYTHON APPLICATION SETUP - -ENV PYTHONDONTWRITEBYTECODE=1 -ENV PYTHONUNBUFFERED=1 -ENV PIP_DISABLE_PIP_VERSION_CHECK=1 - -COPY apps/api/requirements.txt ./api/ -COPY apps/api/requirements ./api/requirements - -RUN pip install -r ./api/requirements.txt --compile --no-cache-dir - -# Add in Django deps and generate Django's static files -COPY apps/api/manage.py ./api/manage.py -COPY apps/api/plane ./api/plane/ -COPY apps/api/templates ./api/templates/ -COPY package.json ./api/package.json - -COPY apps/api/bin ./api/bin/ - -RUN chmod +x ./api/bin/* -RUN chmod -R 777 ./api/ - -# NEXTJS BUILDS -COPY --from=installer /app/web/next.config.js ./web/ -COPY --from=installer /app/web/package.json ./web/ -COPY --from=installer /app/web/.next/standalone ./web -COPY --from=installer /app/web/.next/static ./web/web/.next/static -COPY --from=installer /app/web/public ./web/web/public - -COPY --from=installer /app/space/next.config.js ./space/ -COPY --from=installer /app/space/package.json ./space/ -COPY --from=installer /app/space/.next/standalone ./space -COPY --from=installer /app/space/.next/static ./space/space/.next/static -COPY --from=installer /app/space/public ./space/space/public - -COPY --from=installer /app/admin/next.config.js ./admin/ -COPY --from=installer /app/admin/package.json ./admin/ -COPY --from=installer /app/admin/.next/standalone ./admin -COPY --from=installer /app/admin/.next/static ./admin/admin/.next/static -COPY --from=installer /app/admin/public ./admin/admin/public - -ARG NEXT_PUBLIC_API_BASE_URL="" -ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL - -ARG NEXT_PUBLIC_ADMIN_BASE_URL="" -ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL - -ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode" -ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH - -ARG NEXT_PUBLIC_SPACE_BASE_URL="" -ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL - -ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces" -ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH - -ARG NEXT_PUBLIC_WEB_BASE_URL="" -ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL - -ENV NEXT_TELEMETRY_DISABLED=1 -ENV TURBO_TELEMETRY_DISABLED=1 - -ARG BUILD_TYPE=full -ENV BUILD_TYPE=$BUILD_TYPE - -COPY aio/supervisord-${BUILD_TYPE}-base /app/supervisord.conf -COPY aio/supervisord-app /app/supervisord-app -RUN cat /app/supervisord-app >> /app/supervisord.conf && \ - rm /app/supervisord-app - -COPY ./aio/nginx.conf /etc/nginx/nginx.conf.template - -# if build type is full, run the below copy pg-setup.sh -COPY aio/postgresql.conf /etc/postgresql/postgresql.conf -COPY aio/pg-setup.sh /app/pg-setup.sh -RUN chmod +x /app/pg-setup.sh - -# ***************************************************************************** -# APPLICATION ENVIRONMENT SETTINGS -# ***************************************************************************** -ENV APP_DOMAIN=localhost -ENV WEB_URL=http://${APP_DOMAIN} -ENV DEBUG=0 -ENV CORS_ALLOWED_ORIGINS=http://${APP_DOMAIN},https://${APP_DOMAIN} -# Secret Key -ENV SECRET_KEY=60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5 -# Gunicorn Workers -ENV GUNICORN_WORKERS=1 - -ENV POSTGRES_USER="plane" -ENV POSTGRES_PASSWORD="plane" -ENV POSTGRES_DB="plane" -ENV POSTGRES_HOST="localhost" -ENV POSTGRES_PORT="5432" -ENV DATABASE_URL="postgresql://plane:plane@localhost:5432/plane" - -ENV REDIS_HOST="localhost" -ENV REDIS_PORT="6379" -ENV REDIS_URL="redis://localhost:6379" - -ENV USE_MINIO="1" -ENV AWS_REGION="" -ENV AWS_ACCESS_KEY_ID="access-key" -ENV AWS_SECRET_ACCESS_KEY="secret-key" -ENV AWS_S3_ENDPOINT_URL="http://localhost:9000" -ENV AWS_S3_BUCKET_NAME="uploads" -ENV MINIO_ROOT_USER="access-key" -ENV MINIO_ROOT_PASSWORD="secret-key" -ENV BUCKET_NAME="uploads" -ENV FILE_SIZE_LIMIT="5242880" - -# ***************************************************************************** - -RUN /app/pg-setup.sh - -CMD ["/usr/bin/supervisord", "-c", "/app/supervisord.conf"] diff --git a/aio/Dockerfile-base-full b/aio/Dockerfile-base-full deleted file mode 100644 index 218530948a2..00000000000 --- a/aio/Dockerfile-base-full +++ /dev/null @@ -1,73 +0,0 @@ -FROM --platform=$BUILDPLATFORM tonistiigi/binfmt AS binfmt - -FROM python:3.12-slim - -# Set environment variables to non-interactive for apt -ENV DEBIAN_FRONTEND=noninteractive -ENV BUILD_TYPE=full - -SHELL [ "/bin/bash", "-c" ] - -WORKDIR /app - -RUN mkdir -p /app/{data,logs} && \ - mkdir -p /app/data/{redis,pg,minio,nginx} && \ - mkdir -p /app/logs/{access,error} && \ - mkdir -p /etc/supervisor/conf.d - -# Update the package list and install prerequisites -RUN apt-get update && \ - apt-get install -y \ - gnupg2 curl ca-certificates lsb-release software-properties-common \ - build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev \ - libsqlite3-dev wget llvm libncurses5-dev libncursesw5-dev xz-utils \ - tk-dev libffi-dev liblzma-dev supervisor nginx nano vim ncdu \ - sudo lsof net-tools libpq-dev procps gettext - -# Install Redis 7.2 -RUN echo "deb http://deb.debian.org/debian $(lsb_release -cs)-backports main" > /etc/apt/sources.list.d/backports.list && \ - curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ - echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" > /etc/apt/sources.list.d/redis.list && \ - apt-get update && \ - apt-get install -y redis-server - -# Install PostgreSQL 15 -ENV POSTGRES_VERSION=15 -RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /usr/share/keyrings/pgdg-archive-keyring.gpg && \ - echo "deb [signed-by=/usr/share/keyrings/pgdg-archive-keyring.gpg] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \ - apt-get update && \ - apt-get install -y postgresql-$POSTGRES_VERSION postgresql-client-$POSTGRES_VERSION && \ - mkdir -p /var/lib/postgresql/data && \ - chown -R postgres:postgres /var/lib/postgresql -COPY postgresql.conf /etc/postgresql/postgresql.conf -RUN sudo -u postgres /usr/lib/postgresql/$POSTGRES_VERSION/bin/initdb -D /var/lib/postgresql/data - -# Install MinIO -ARG TARGETARCH -RUN if [ "$TARGETARCH" = "amd64" ]; then \ - curl -fSl https://dl.min.io/server/minio/release/linux-amd64/minio -o /usr/local/bin/minio; \ - elif [ "$TARGETARCH" = "arm64" ]; then \ - curl -fSl https://dl.min.io/server/minio/release/linux-arm64/minio -o /usr/local/bin/minio; \ - else \ - echo "Unsupported architecture: $TARGETARCH"; exit 1; \ - fi && \ - chmod +x /usr/local/bin/minio - -# Install Node.js 18 -RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - && \ - apt-get install -y nodejs && \ - python -m pip install --upgrade pip && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# Create Supervisor configuration file -COPY supervisord-full-base /app/supervisord.conf -COPY nginx.conf /etc/nginx/nginx.conf.template -COPY env.sh /app/nginx-start.sh -RUN chmod +x /app/nginx-start.sh - -# Expose ports for Redis, PostgreSQL, and MinIO -EXPOSE 6379 5432 9000 80 443 - -# Start Supervisor -CMD ["/usr/bin/supervisord", "-c", "/app/supervisord.conf"] diff --git a/aio/Dockerfile-base-slim b/aio/Dockerfile-base-slim deleted file mode 100644 index c6bc249de69..00000000000 --- a/aio/Dockerfile-base-slim +++ /dev/null @@ -1,45 +0,0 @@ -FROM --platform=$BUILDPLATFORM tonistiigi/binfmt AS binfmt - -FROM python:3.12-slim - -# Set environment variables to non-interactive for apt -ENV DEBIAN_FRONTEND=noninteractive -ENV BUILD_TYPE=slim - -SHELL [ "/bin/bash", "-c" ] - -WORKDIR /app - -RUN mkdir -p /app/{data,logs} && \ - mkdir -p /app/data/{nginx} && \ - mkdir -p /app/logs/{access,error} && \ - mkdir -p /etc/supervisor/conf.d - -# Update the package list and install prerequisites -RUN apt-get update && \ - apt-get install -y \ - gnupg2 curl ca-certificates lsb-release software-properties-common \ - build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev \ - libsqlite3-dev wget llvm libncurses5-dev libncursesw5-dev xz-utils \ - tk-dev libffi-dev liblzma-dev supervisor nginx nano vim ncdu \ - sudo lsof net-tools libpq-dev procps gettext - -# Install Node.js 18 -RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - && \ - apt-get install -y nodejs - -RUN python -m pip install --upgrade pip && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# Create Supervisor configuration file -COPY supervisord-slim-base /app/supervisord.conf -COPY nginx.conf /etc/nginx/nginx.conf.template -COPY env.sh /app/nginx-start.sh -RUN chmod +x /app/nginx-start.sh - -# Expose ports for Redis, PostgreSQL, and MinIO -EXPOSE 80 443 - -# Start Supervisor -CMD ["/usr/bin/supervisord", "-c", "/app/supervisord.conf"] diff --git a/aio/env.sh b/aio/env.sh deleted file mode 100644 index ff5f769fd87..00000000000 --- a/aio/env.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -export dollar="$" -export http_upgrade="http_upgrade" -export scheme="scheme" -envsubst < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf -exec nginx -g 'daemon off;' diff --git a/aio/nginx.conf b/aio/nginx.conf deleted file mode 100644 index 78ae00d28ce..00000000000 --- a/aio/nginx.conf +++ /dev/null @@ -1,72 +0,0 @@ -events { -} - -http { - sendfile on; - - server { - listen 80; - root /www/data/; - access_log /var/log/nginx/access.log; - - client_max_body_size ${FILE_SIZE_LIMIT}; - - add_header X-Content-Type-Options "nosniff" always; - add_header Referrer-Policy "no-referrer-when-downgrade" always; - add_header Permissions-Policy "interest-cohort=()" always; - add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; - add_header X-Forwarded-Proto "${dollar}scheme"; - add_header X-Forwarded-Host "${dollar}host"; - add_header X-Forwarded-For "${dollar}proxy_add_x_forwarded_for"; - add_header X-Real-IP "${dollar}remote_addr"; - - location / { - proxy_http_version 1.1; - proxy_set_header Upgrade ${dollar}http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_set_header Host ${dollar}http_host; - proxy_pass http://localhost:3001/; - } - - location /spaces/ { - rewrite ^/spaces/?$ /spaces/login break; - proxy_http_version 1.1; - proxy_set_header Upgrade ${dollar}http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_set_header Host ${dollar}http_host; - proxy_pass http://localhost:3002/spaces/; - } - - location /god-mode/ { - proxy_http_version 1.1; - proxy_set_header Upgrade ${dollar}http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_set_header Host ${dollar}http_host; - proxy_pass http://localhost:3003/god-mode/; - } - - location /api/ { - proxy_http_version 1.1; - proxy_set_header Upgrade ${dollar}http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_set_header Host ${dollar}http_host; - proxy_pass http://localhost:8000/api/; - } - - location /auth/ { - proxy_http_version 1.1; - proxy_set_header Upgrade ${dollar}http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_set_header Host ${dollar}http_host; - proxy_pass http://localhost:8000/auth/; - } - - location /${BUCKET_NAME}/ { - proxy_http_version 1.1; - proxy_set_header Upgrade ${dollar}http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_set_header Host ${dollar}http_host; - proxy_pass http://localhost:9000/uploads/; - } - } -} diff --git a/aio/pg-setup.sh b/aio/pg-setup.sh deleted file mode 100644 index b830acc5ec6..00000000000 --- a/aio/pg-setup.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -if [ "$BUILD_TYPE" == "full" ]; then - - export PGHOST=localhost - - sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_ctl" -D /var/lib/postgresql/data start - sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/psql" --command "CREATE USER $POSTGRES_USER WITH SUPERUSER PASSWORD '$POSTGRES_PASSWORD';" && \ - sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/createdb" -O "$POSTGRES_USER" "$POSTGRES_DB" && \ - sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/psql" --command "GRANT ALL PRIVILEGES ON DATABASE $POSTGRES_DB TO $POSTGRES_USER;" && \ - sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_ctl" -D /var/lib/postgresql/data stop - -fi - diff --git a/aio/postgresql.conf b/aio/postgresql.conf deleted file mode 100644 index 8f3c4e8a4c4..00000000000 --- a/aio/postgresql.conf +++ /dev/null @@ -1,815 +0,0 @@ -# ----------------------------- -# PostgreSQL configuration file -# ----------------------------- -# -# This file consists of lines of the form: -# -# name = value -# -# (The "=" is optional.) Whitespace may be used. Comments are introduced with -# "#" anywhere on a line. The complete list of parameter names and allowed -# values can be found in the PostgreSQL documentation. -# -# The commented-out settings shown in this file represent the default values. -# Re-commenting a setting is NOT sufficient to revert it to the default value; -# you need to reload the server. -# -# This file is read on server startup and when the server receives a SIGHUP -# signal. If you edit the file on a running system, you have to SIGHUP the -# server for the changes to take effect, run "pg_ctl reload", or execute -# "SELECT pg_reload_conf()". Some parameters, which are marked below, -# require a server shutdown and restart to take effect. -# -# Any parameter can also be given as a command-line option to the server, e.g., -# "postgres -c log_connections=on". Some parameters can be changed at run time -# with the "SET" SQL command. -# -# Memory units: B = bytes Time units: us = microseconds -# kB = kilobytes ms = milliseconds -# MB = megabytes s = seconds -# GB = gigabytes min = minutes -# TB = terabytes h = hours -# d = days - - -#------------------------------------------------------------------------------ -# FILE LOCATIONS -#------------------------------------------------------------------------------ - -# The default values of these variables are driven from the -D command-line -# option or PGDATA environment variable, represented here as ConfigDir. - -data_directory = '/var/lib/postgresql/data' # use data in another directory - # (change requires restart) -hba_file = '/etc/postgresql/15/main/pg_hba.conf' # host-based authentication file - # (change requires restart) -ident_file = '/etc/postgresql/15/main/pg_ident.conf' # ident configuration file - # (change requires restart) - -# If external_pid_file is not explicitly set, no extra PID file is written. -external_pid_file = '/var/run/postgresql/15-main.pid' # write an extra PID file - # (change requires restart) - - -#------------------------------------------------------------------------------ -# CONNECTIONS AND AUTHENTICATION -#------------------------------------------------------------------------------ - -# - Connection Settings - - -listen_addresses = 'localhost' # what IP address(es) to listen on; - # comma-separated list of addresses; - # defaults to 'localhost'; use '*' for all - # (change requires restart) -port = 5432 # (change requires restart) -max_connections = 200 # (change requires restart) -#superuser_reserved_connections = 3 # (change requires restart) -unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories - # (change requires restart) -#unix_socket_group = '' # (change requires restart) -#unix_socket_permissions = 0777 # begin with 0 to use octal notation - # (change requires restart) -#bonjour = off # advertise server via Bonjour - # (change requires restart) -#bonjour_name = '' # defaults to the computer name - # (change requires restart) - -# - TCP settings - -# see "man tcp" for details - -#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; - # 0 selects the system default -#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; - # 0 selects the system default -#tcp_keepalives_count = 0 # TCP_KEEPCNT; - # 0 selects the system default -#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; - # 0 selects the system default - -#client_connection_check_interval = 0 # time between checks for client - # disconnection while running queries; - # 0 for never - -# - Authentication - - -#authentication_timeout = 1min # 1s-600s -#password_encryption = scram-sha-256 # scram-sha-256 or md5 -#db_user_namespace = off - -# GSSAPI using Kerberos -#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' -#krb_caseins_users = off - -# - SSL - - -ssl = on -#ssl_ca_file = '' -ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem' -#ssl_crl_file = '' -#ssl_crl_dir = '' -ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key' -#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers -#ssl_prefer_server_ciphers = on -#ssl_ecdh_curve = 'prime256v1' -#ssl_min_protocol_version = 'TLSv1.2' -#ssl_max_protocol_version = '' -#ssl_dh_params_file = '' -#ssl_passphrase_command = '' -#ssl_passphrase_command_supports_reload = off - - -#------------------------------------------------------------------------------ -# RESOURCE USAGE (except WAL) -#------------------------------------------------------------------------------ - -# - Memory - - -shared_buffers = 256MB # min 128kB - # (change requires restart) -#huge_pages = try # on, off, or try - # (change requires restart) -#huge_page_size = 0 # zero for system default - # (change requires restart) -#temp_buffers = 8MB # min 800kB -#max_prepared_transactions = 0 # zero disables the feature - # (change requires restart) -# Caution: it is not advisable to set max_prepared_transactions nonzero unless -# you actively intend to use prepared transactions. -#work_mem = 4MB # min 64kB -#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem -#maintenance_work_mem = 64MB # min 1MB -#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem -#logical_decoding_work_mem = 64MB # min 64kB -#max_stack_depth = 2MB # min 100kB -#shared_memory_type = mmap # the default is the first option - # supported by the operating system: - # mmap - # sysv - # windows - # (change requires restart) -dynamic_shared_memory_type = posix # the default is usually the first option - # supported by the operating system: - # posix - # sysv - # windows - # mmap - # (change requires restart) -#min_dynamic_shared_memory = 0MB # (change requires restart) - -# - Disk - - -#temp_file_limit = -1 # limits per-process temp file space - # in kilobytes, or -1 for no limit - -# - Kernel Resources - - -#max_files_per_process = 1000 # min 64 - # (change requires restart) - -# - Cost-Based Vacuum Delay - - -#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables) -#vacuum_cost_page_hit = 1 # 0-10000 credits -#vacuum_cost_page_miss = 2 # 0-10000 credits -#vacuum_cost_page_dirty = 20 # 0-10000 credits -#vacuum_cost_limit = 200 # 1-10000 credits - -# - Background Writer - - -#bgwriter_delay = 200ms # 10-10000ms between rounds -#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables -#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round -#bgwriter_flush_after = 512kB # measured in pages, 0 disables - -# - Asynchronous Behavior - - -#backend_flush_after = 0 # measured in pages, 0 disables -#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching -#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching -#max_worker_processes = 8 # (change requires restart) -#max_parallel_workers_per_gather = 2 # limited by max_parallel_workers -#max_parallel_maintenance_workers = 2 # limited by max_parallel_workers -#max_parallel_workers = 8 # number of max_worker_processes that - # can be used in parallel operations -#parallel_leader_participation = on -#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate - # (change requires restart) - - -#------------------------------------------------------------------------------ -# WRITE-AHEAD LOG -#------------------------------------------------------------------------------ - -# - Settings - - -#wal_level = replica # minimal, replica, or logical - # (change requires restart) -#fsync = on # flush data to disk for crash safety - # (turning this off can cause - # unrecoverable data corruption) -#synchronous_commit = on # synchronization level; - # off, local, remote_write, remote_apply, or on -#wal_sync_method = fsync # the default is the first option - # supported by the operating system: - # open_datasync - # fdatasync (default on Linux and FreeBSD) - # fsync - # fsync_writethrough - # open_sync -#full_page_writes = on # recover from partial page writes -#wal_log_hints = off # also do full page writes of non-critical updates - # (change requires restart) -#wal_compression = off # enables compression of full-page writes; - # off, pglz, lz4, zstd, or on -#wal_init_zero = on # zero-fill new WAL files -#wal_recycle = on # recycle WAL files -#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers - # (change requires restart) -#wal_writer_delay = 200ms # 1-10000 milliseconds -#wal_writer_flush_after = 1MB # measured in pages, 0 disables -#wal_skip_threshold = 2MB - -#commit_delay = 0 # range 0-100000, in microseconds -#commit_siblings = 5 # range 1-1000 - -# - Checkpoints - - -#checkpoint_timeout = 5min # range 30s-1d -#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 -#checkpoint_flush_after = 256kB # measured in pages, 0 disables -#checkpoint_warning = 30s # 0 disables -max_wal_size = 1GB -min_wal_size = 80MB - -# - Prefetching during recovery - - -#recovery_prefetch = try # prefetch pages referenced in the WAL? -#wal_decode_buffer_size = 512kB # lookahead window used for prefetching - # (change requires restart) - -# - Archiving - - -#archive_mode = off # enables archiving; off, on, or always - # (change requires restart) -#archive_library = '' # library to use to archive a logfile segment - # (empty string indicates archive_command should - # be used) -#archive_command = '' # command to use to archive a logfile segment - # placeholders: %p = path of file to archive - # %f = file name only - # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' -#archive_timeout = 0 # force a logfile segment switch after this - # number of seconds; 0 disables - -# - Archive Recovery - - -# These are only used in recovery mode. - -#restore_command = '' # command to use to restore an archived logfile segment - # placeholders: %p = path of file to restore - # %f = file name only - # e.g. 'cp /mnt/server/archivedir/%f %p' -#archive_cleanup_command = '' # command to execute at every restartpoint -#recovery_end_command = '' # command to execute at completion of recovery - -# - Recovery Target - - -# Set these only when performing a targeted recovery. - -#recovery_target = '' # 'immediate' to end recovery as soon as a - # consistent state is reached - # (change requires restart) -#recovery_target_name = '' # the named restore point to which recovery will proceed - # (change requires restart) -#recovery_target_time = '' # the time stamp up to which recovery will proceed - # (change requires restart) -#recovery_target_xid = '' # the transaction ID up to which recovery will proceed - # (change requires restart) -#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed - # (change requires restart) -#recovery_target_inclusive = on # Specifies whether to stop: - # just after the specified recovery target (on) - # just before the recovery target (off) - # (change requires restart) -#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID - # (change requires restart) -#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' - # (change requires restart) - - -#------------------------------------------------------------------------------ -# REPLICATION -#------------------------------------------------------------------------------ - -# - Sending Servers - - -# Set these on the primary and on any standby that will send replication data. - -#max_wal_senders = 10 # max number of walsender processes - # (change requires restart) -#max_replication_slots = 10 # max number of replication slots - # (change requires restart) -#wal_keep_size = 0 # in megabytes; 0 disables -#max_slot_wal_keep_size = -1 # in megabytes; -1 disables -#wal_sender_timeout = 60s # in milliseconds; 0 disables -#track_commit_timestamp = off # collect timestamp of transaction commit - # (change requires restart) - -# - Primary Server - - -# These settings are ignored on a standby server. - -#synchronous_standby_names = '' # standby servers that provide sync rep - # method to choose sync standbys, number of sync standbys, - # and comma-separated list of application_name - # from standby(s); '*' = all -#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed - -# - Standby Servers - - -# These settings are ignored on a primary server. - -#primary_conninfo = '' # connection string to sending server -#primary_slot_name = '' # replication slot on sending server -#promote_trigger_file = '' # file name whose presence ends recovery -#hot_standby = on # "off" disallows queries during recovery - # (change requires restart) -#max_standby_archive_delay = 30s # max delay before canceling queries - # when reading WAL from archive; - # -1 allows indefinite delay -#max_standby_streaming_delay = 30s # max delay before canceling queries - # when reading streaming WAL; - # -1 allows indefinite delay -#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name - # is not set -#wal_receiver_status_interval = 10s # send replies at least this often - # 0 disables -#hot_standby_feedback = off # send info from standby to prevent - # query conflicts -#wal_receiver_timeout = 60s # time that receiver waits for - # communication from primary - # in milliseconds; 0 disables -#wal_retrieve_retry_interval = 5s # time to wait before retrying to - # retrieve WAL after a failed attempt -#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery - -# - Subscribers - - -# These settings are ignored on a publisher. - -#max_logical_replication_workers = 4 # taken from max_worker_processes - # (change requires restart) -#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers - - -#------------------------------------------------------------------------------ -# QUERY TUNING -#------------------------------------------------------------------------------ - -# - Planner Method Configuration - - -#enable_async_append = on -#enable_bitmapscan = on -#enable_gathermerge = on -#enable_hashagg = on -#enable_hashjoin = on -#enable_incremental_sort = on -#enable_indexscan = on -#enable_indexonlyscan = on -#enable_material = on -#enable_memoize = on -#enable_mergejoin = on -#enable_nestloop = on -#enable_parallel_append = on -#enable_parallel_hash = on -#enable_partition_pruning = on -#enable_partitionwise_join = off -#enable_partitionwise_aggregate = off -#enable_seqscan = on -#enable_sort = on -#enable_tidscan = on - -# - Planner Cost Constants - - -#seq_page_cost = 1.0 # measured on an arbitrary scale -#random_page_cost = 4.0 # same scale as above -#cpu_tuple_cost = 0.01 # same scale as above -#cpu_index_tuple_cost = 0.005 # same scale as above -#cpu_operator_cost = 0.0025 # same scale as above -#parallel_setup_cost = 1000.0 # same scale as above -#parallel_tuple_cost = 0.1 # same scale as above -#min_parallel_table_scan_size = 8MB -#min_parallel_index_scan_size = 512kB -#effective_cache_size = 4GB - -#jit_above_cost = 100000 # perform JIT compilation if available - # and query more expensive than this; - # -1 disables -#jit_inline_above_cost = 500000 # inline small functions if query is - # more expensive than this; -1 disables -#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if - # query is more expensive than this; - # -1 disables - -# - Genetic Query Optimizer - - -#geqo = on -#geqo_threshold = 12 -#geqo_effort = 5 # range 1-10 -#geqo_pool_size = 0 # selects default based on effort -#geqo_generations = 0 # selects default based on effort -#geqo_selection_bias = 2.0 # range 1.5-2.0 -#geqo_seed = 0.0 # range 0.0-1.0 - -# - Other Planner Options - - -#default_statistics_target = 100 # range 1-10000 -#constraint_exclusion = partition # on, off, or partition -#cursor_tuple_fraction = 0.1 # range 0.0-1.0 -#from_collapse_limit = 8 -#jit = on # allow JIT compilation -#join_collapse_limit = 8 # 1 disables collapsing of explicit - # JOIN clauses -#plan_cache_mode = auto # auto, force_generic_plan or - # force_custom_plan -#recursive_worktable_factor = 10.0 # range 0.001-1000000 - - -#------------------------------------------------------------------------------ -# REPORTING AND LOGGING -#------------------------------------------------------------------------------ - -# - Where to Log - - -#log_destination = 'stderr' # Valid values are combinations of - # stderr, csvlog, jsonlog, syslog, and - # eventlog, depending on platform. - # csvlog and jsonlog require - # logging_collector to be on. - -# This is used when logging to stderr: -#logging_collector = off # Enable capturing of stderr, jsonlog, - # and csvlog into log files. Required - # to be on for csvlogs and jsonlogs. - # (change requires restart) - -# These are only used if logging_collector is on: -#log_directory = 'log' # directory where log files are written, - # can be absolute or relative to PGDATA -#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, - # can include strftime() escapes -#log_file_mode = 0600 # creation mode for log files, - # begin with 0 to use octal notation -#log_rotation_age = 1d # Automatic rotation of logfiles will - # happen after that time. 0 disables. -#log_rotation_size = 10MB # Automatic rotation of logfiles will - # happen after that much log output. - # 0 disables. -#log_truncate_on_rotation = off # If on, an existing log file with the - # same name as the new log file will be - # truncated rather than appended to. - # But such truncation only occurs on - # time-driven rotation, not on restarts - # or size-driven rotation. Default is - # off, meaning append to existing files - # in all cases. - -# These are relevant when logging to syslog: -#syslog_facility = 'LOCAL0' -#syslog_ident = 'postgres' -#syslog_sequence_numbers = on -#syslog_split_messages = on - -# This is only relevant when logging to eventlog (Windows): -# (change requires restart) -#event_source = 'PostgreSQL' - -# - When to Log - - -#log_min_messages = warning # values in order of decreasing detail: - # debug5 - # debug4 - # debug3 - # debug2 - # debug1 - # info - # notice - # warning - # error - # log - # fatal - # panic - -#log_min_error_statement = error # values in order of decreasing detail: - # debug5 - # debug4 - # debug3 - # debug2 - # debug1 - # info - # notice - # warning - # error - # log - # fatal - # panic (effectively off) - -#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements - # and their durations, > 0 logs only - # statements running at least this number - # of milliseconds - -#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements - # and their durations, > 0 logs only a sample of - # statements running at least this number - # of milliseconds; - # sample fraction is determined by log_statement_sample_rate - -#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding - # log_min_duration_sample to be logged; - # 1.0 logs all such statements, 0.0 never logs - - -#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements - # are logged regardless of their duration; 1.0 logs all - # statements from all transactions, 0.0 never logs - -#log_startup_progress_interval = 10s # Time between progress updates for - # long-running startup operations. - # 0 disables the feature, > 0 indicates - # the interval in milliseconds. - -# - What to Log - - -#debug_print_parse = off -#debug_print_rewritten = off -#debug_print_plan = off -#debug_pretty_print = on -#log_autovacuum_min_duration = 10min # log autovacuum activity; - # -1 disables, 0 logs all actions and - # their durations, > 0 logs only - # actions running at least this number - # of milliseconds. -#log_checkpoints = on -#log_connections = off -#log_disconnections = off -#log_duration = off -#log_error_verbosity = default # terse, default, or verbose messages -#log_hostname = off -log_line_prefix = '%m [%p] %q%u@%d ' # special values: - # %a = application name - # %u = user name - # %d = database name - # %r = remote host and port - # %h = remote host - # %b = backend type - # %p = process ID - # %P = process ID of parallel group leader - # %t = timestamp without milliseconds - # %m = timestamp with milliseconds - # %n = timestamp with milliseconds (as a Unix epoch) - # %Q = query ID (0 if none or not computed) - # %i = command tag - # %e = SQL state - # %c = session ID - # %l = session line number - # %s = session start timestamp - # %v = virtual transaction ID - # %x = transaction ID (0 if none) - # %q = stop here in non-session - # processes - # %% = '%' - # e.g. '<%u%%%d> ' -#log_lock_waits = off # log lock waits >= deadlock_timeout -#log_recovery_conflict_waits = off # log standby recovery conflict waits - # >= deadlock_timeout -#log_parameter_max_length = -1 # when logging statements, limit logged - # bind-parameter values to N bytes; - # -1 means print in full, 0 disables -#log_parameter_max_length_on_error = 0 # when logging an error, limit logged - # bind-parameter values to N bytes; - # -1 means print in full, 0 disables -#log_statement = 'none' # none, ddl, mod, all -#log_replication_commands = off -#log_temp_files = -1 # log temporary files equal or larger - # than the specified size in kilobytes; - # -1 disables, 0 logs all temp files -log_timezone = 'Etc/UTC' - - -#------------------------------------------------------------------------------ -# PROCESS TITLE -#------------------------------------------------------------------------------ - -cluster_name = '15/main' # added to process titles if nonempty - # (change requires restart) -#update_process_title = on - - -#------------------------------------------------------------------------------ -# STATISTICS -#------------------------------------------------------------------------------ - -# - Cumulative Query and Index Statistics - - -#track_activities = on -#track_activity_query_size = 1024 # (change requires restart) -#track_counts = on -#track_io_timing = off -#track_wal_io_timing = off -#track_functions = none # none, pl, all -#stats_fetch_consistency = cache - - -# - Monitoring - - -#compute_query_id = auto -#log_statement_stats = off -#log_parser_stats = off -#log_planner_stats = off -#log_executor_stats = off - - -#------------------------------------------------------------------------------ -# AUTOVACUUM -#------------------------------------------------------------------------------ - -#autovacuum = on # Enable autovacuum subprocess? 'on' - # requires track_counts to also be on. -#autovacuum_max_workers = 3 # max number of autovacuum subprocesses - # (change requires restart) -#autovacuum_naptime = 1min # time between autovacuum runs -#autovacuum_vacuum_threshold = 50 # min number of row updates before - # vacuum -#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts - # before vacuum; -1 disables insert - # vacuums -#autovacuum_analyze_threshold = 50 # min number of row updates before - # analyze -#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum -#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table - # size before insert vacuum -#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze -#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum - # (change requires restart) -#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age - # before forced vacuum - # (change requires restart) -#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for - # autovacuum, in milliseconds; - # -1 means use vacuum_cost_delay -#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for - # autovacuum, -1 means use - # vacuum_cost_limit - - -#------------------------------------------------------------------------------ -# CLIENT CONNECTION DEFAULTS -#------------------------------------------------------------------------------ - -# - Statement Behavior - - -#client_min_messages = notice # values in order of decreasing detail: - # debug5 - # debug4 - # debug3 - # debug2 - # debug1 - # log - # notice - # warning - # error -#search_path = '"$user", public' # schema names -#row_security = on -#default_table_access_method = 'heap' -#default_tablespace = '' # a tablespace name, '' uses the default -#default_toast_compression = 'pglz' # 'pglz' or 'lz4' -#temp_tablespaces = '' # a list of tablespace names, '' uses - # only default tablespace -#check_function_bodies = on -#default_transaction_isolation = 'read committed' -#default_transaction_read_only = off -#default_transaction_deferrable = off -#session_replication_role = 'origin' -#statement_timeout = 0 # in milliseconds, 0 is disabled -#lock_timeout = 0 # in milliseconds, 0 is disabled -#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled -#idle_session_timeout = 0 # in milliseconds, 0 is disabled -#vacuum_freeze_table_age = 150000000 -#vacuum_freeze_min_age = 50000000 -#vacuum_failsafe_age = 1600000000 -#vacuum_multixact_freeze_table_age = 150000000 -#vacuum_multixact_freeze_min_age = 5000000 -#vacuum_multixact_failsafe_age = 1600000000 -#bytea_output = 'hex' # hex, escape -#xmlbinary = 'base64' -#xmloption = 'content' -#gin_pending_list_limit = 4MB - -# - Locale and Formatting - - -datestyle = 'iso, mdy' -#intervalstyle = 'postgres' -timezone = 'Etc/UTC' -#timezone_abbreviations = 'Default' # Select the set of available time zone - # abbreviations. Currently, there are - # Default - # Australia (historical usage) - # India - # You can create your own file in - # share/timezonesets/. -#extra_float_digits = 1 # min -15, max 3; any value >0 actually - # selects precise output mode -#client_encoding = sql_ascii # actually, defaults to database - # encoding - -# These settings are initialized by initdb, but they can be changed. -lc_messages = 'C.UTF-8' # locale for system error message - # strings -lc_monetary = 'C.UTF-8' # locale for monetary formatting -lc_numeric = 'C.UTF-8' # locale for number formatting -lc_time = 'C.UTF-8' # locale for time formatting - -# default configuration for text search -default_text_search_config = 'pg_catalog.english' - -# - Shared Library Preloading - - -#local_preload_libraries = '' -#session_preload_libraries = '' -#shared_preload_libraries = '' # (change requires restart) -#jit_provider = 'llvmjit' # JIT library to use - -# - Other Defaults - - -#dynamic_library_path = '$libdir' -#extension_destdir = '' # prepend path when loading extensions - # and shared objects (added by Debian) -#gin_fuzzy_search_limit = 0 - - -#------------------------------------------------------------------------------ -# LOCK MANAGEMENT -#------------------------------------------------------------------------------ - -#deadlock_timeout = 1s -#max_locks_per_transaction = 64 # min 10 - # (change requires restart) -#max_pred_locks_per_transaction = 64 # min 10 - # (change requires restart) -#max_pred_locks_per_relation = -2 # negative values mean - # (max_pred_locks_per_transaction - # / -max_pred_locks_per_relation) - 1 -#max_pred_locks_per_page = 2 # min 0 - - -#------------------------------------------------------------------------------ -# VERSION AND PLATFORM COMPATIBILITY -#------------------------------------------------------------------------------ - -# - Previous PostgreSQL Versions - - -#array_nulls = on -#backslash_quote = safe_encoding # on, off, or safe_encoding -#escape_string_warning = on -#lo_compat_privileges = off -#quote_all_identifiers = off -#standard_conforming_strings = on -#synchronize_seqscans = on - -# - Other Platforms and Clients - - -#transform_null_equals = off - - -#------------------------------------------------------------------------------ -# ERROR HANDLING -#------------------------------------------------------------------------------ - -#exit_on_error = off # terminate session on any error? -#restart_after_crash = on # reinitialize after backend crash? -#data_sync_retry = off # retry or panic on failure to fsync - # data? - # (change requires restart) -#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) - - -#------------------------------------------------------------------------------ -# CONFIG FILE INCLUDES -#------------------------------------------------------------------------------ - -# These options allow settings to be loaded from files other than the -# default postgresql.conf. Note that these are directives, not variable -# assignments, so they can usefully be given more than once. - -# include_dir = 'conf.d' # include files ending in '.conf' from - # a directory, e.g., 'conf.d' -#include_if_exists = '...' # include file only if it exists -#include = '...' # include file - - -#------------------------------------------------------------------------------ -# CUSTOMIZED OPTIONS -#------------------------------------------------------------------------------ - -# Add settings for extensions here diff --git a/aio/supervisord-app b/aio/supervisord-app deleted file mode 100644 index e2cf1f04754..00000000000 --- a/aio/supervisord-app +++ /dev/null @@ -1,71 +0,0 @@ - -[program:web] -command=node /app/web/web/server.js -autostart=true -autorestart=true -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stdout -stderr_logfile_maxbytes=0 -environment=PORT=3001,HOSTNAME=0.0.0.0 - -[program:space] -command=node /app/space/space/server.js -autostart=true -autorestart=true -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stdout -stderr_logfile_maxbytes=0 -environment=PORT=3002,HOSTNAME=0.0.0.0 - -[program:admin] -command=node /app/admin/admin/server.js -autostart=true -autorestart=true -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stdout -stderr_logfile_maxbytes=0 -environment=PORT=3003,HOSTNAME=0.0.0.0 - -[program:migrator] -directory=/app/api -command=sh -c "./bin/docker-entrypoint-migrator.sh" -autostart=true -autorestart=false -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stdout -stderr_logfile_maxbytes=0 - -[program:api] -directory=/app/api -command=sh -c "./bin/docker-entrypoint-api.sh" -autostart=true -autorestart=true -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stdout -stderr_logfile_maxbytes=0 - -[program:worker] -directory=/app/api -command=sh -c "./bin/docker-entrypoint-worker.sh" -autostart=true -autorestart=true -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stdout -stderr_logfile_maxbytes=0 - -[program:beat] -directory=/app/api -command=sh -c "./bin/docker-entrypoint-beat.sh" -autostart=true -autorestart=true -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stdout -stderr_logfile_maxbytes=0 - diff --git a/aio/supervisord-full-base b/aio/supervisord-full-base deleted file mode 100644 index 0a6c27e13a1..00000000000 --- a/aio/supervisord-full-base +++ /dev/null @@ -1,38 +0,0 @@ -[supervisord] -user=root -nodaemon=true -stderr_logfile=/app/logs/error/supervisor.err.log -stdout_logfile=/app/logs/access/supervisor.log - -[program:redis] -directory=/app/data/redis -command=redis-server -autostart=true -autorestart=true -stderr_logfile=/app/logs/error/redis.err.log -stdout_logfile=/app/logs/access/redis.log - -[program:postgresql] -user=postgres -command=/usr/lib/postgresql/15/bin/postgres --config-file=/etc/postgresql/postgresql.conf -autostart=true -autorestart=true -stderr_logfile=/app/logs/error/postgresql.err.log -stdout_logfile=/app/logs/access/postgresql.log - -[program:minio] -directory=/app/data/minio -command=minio server /app/data/minio -autostart=true -autorestart=true -stderr_logfile=/app/logs/error/minio.err.log -stdout_logfile=/app/logs/access/minio.log - -[program:nginx] -directory=/app/data/nginx -command=/app/nginx-start.sh -autostart=true -autorestart=true -stderr_logfile=/app/logs/error/nginx.err.log -stdout_logfile=/app/logs/access/nginx.log - diff --git a/aio/supervisord-slim-base b/aio/supervisord-slim-base deleted file mode 100644 index 24509216e09..00000000000 --- a/aio/supervisord-slim-base +++ /dev/null @@ -1,14 +0,0 @@ -[supervisord] -user=root -nodaemon=true -stderr_logfile=/app/logs/error/supervisor.err.log -stdout_logfile=/app/logs/access/supervisor.log - -[program:nginx] -directory=/app/data/nginx -command=/app/nginx-start.sh -autostart=true -autorestart=true -stderr_logfile=/app/logs/error/nginx.err.log -stdout_logfile=/app/logs/access/nginx.log - diff --git a/apps/proxy/Caddyfile.ce b/apps/proxy/Caddyfile.ce new file mode 100644 index 00000000000..7f8fc79f758 --- /dev/null +++ b/apps/proxy/Caddyfile.ce @@ -0,0 +1,34 @@ +(plane_proxy) { + request_body { + max_size {$FILE_SIZE_LIMIT} + } + + reverse_proxy /spaces/* space:3000 + + reverse_proxy /god-mode/* admin:3000 + + reverse_proxy /live/* live:3000 + + reverse_proxy /api/* api:8000 + + reverse_proxy /auth/* api:8000 + + reverse_proxy /{$BUCKET_NAME}/* plane-minio:9000 + + reverse_proxy /* web:3000 +} + +{ + {$CERT_EMAIL} + acme_ca {$CERT_ACME_CA:https://acme-v02.api.letsencrypt.org/directory} + {$CERT_ACME_DNS} + servers { + max_header_size 25MB + client_ip_headers X-Forwarded-For X-Real-IP + trusted_proxies static {$TRUSTED_PROXIES:0.0.0.0/0} + } +} + +{$SITE_ADDRESS} { + import plane_proxy +} \ No newline at end of file diff --git a/apps/proxy/Dockerfile.ce b/apps/proxy/Dockerfile.ce new file mode 100644 index 00000000000..abec06b1c39 --- /dev/null +++ b/apps/proxy/Dockerfile.ce @@ -0,0 +1,14 @@ +FROM caddy:2.10.0-builder-alpine AS caddy-builder + +RUN xcaddy build \ + --with github.com/caddy-dns/cloudflare@v0.2.1 \ + --with github.com/caddy-dns/digitalocean@04bde2867106aa1b44c2f9da41a285fa02e629c5 \ + --with github.com/mholt/caddy-l4@4d3c80e89c5f80438a3e048a410d5543ff5fb9f4 + +FROM caddy:2.10.0-builder-alpine + +RUN apk add nss-tools bash curl + +COPY --from=caddy-builder /usr/bin/caddy /usr/bin/caddy + +COPY Caddyfile.ce /etc/caddy/Caddyfile \ No newline at end of file diff --git a/deployments/aio/community/Dockerfile b/deployments/aio/community/Dockerfile new file mode 100644 index 00000000000..52a21d07da5 --- /dev/null +++ b/deployments/aio/community/Dockerfile @@ -0,0 +1,66 @@ +ARG PLANE_VERSION=v0.27.1 +FROM --platform=$BUILDPLATFORM tonistiigi/binfmt AS binfmt + +# ************************************************** +# STAGE 0: Image Loading +# ************************************************** +FROM node:20-alpine AS node +FROM artifacts.plane.so/makeplane/plane-frontend:${PLANE_VERSION} AS web-img +FROM artifacts.plane.so/makeplane/plane-backend:${PLANE_VERSION} AS backend-img +FROM artifacts.plane.so/makeplane/plane-space:${PLANE_VERSION} AS space-img +FROM artifacts.plane.so/makeplane/plane-admin:${PLANE_VERSION} AS admin-img +FROM artifacts.plane.so/makeplane/plane-live:${PLANE_VERSION} AS live-img +FROM artifacts.plane.so/makeplane/plane-proxy:${PLANE_VERSION} AS proxy-img + +# ************************************************** +# STAGE 1: Runner +# ************************************************** +FROM python:3.12.5-alpine AS runner + +WORKDIR /app + +RUN apk add --no-cache \ + "libpq" \ + "libxslt" \ + "xmlsec" + + +COPY --from=node /usr/lib /usr/lib +COPY --from=node /usr/local/lib /usr/local/lib +COPY --from=node /usr/local/include /usr/local/include +COPY --from=node /usr/local/bin /usr/local/bin + +COPY --from=web-img /app /app/web +COPY --from=space-img /app /app/space +COPY --from=admin-img /app /app/admin +COPY --from=live-img /app /app/live + +RUN rm -rf /app/web/web/.next/cache && \ + rm -rf /app/space/space/.next/cache + +COPY --from=proxy-img /usr/bin/caddy /usr/bin/caddy +COPY dist/Caddyfile /app/proxy/Caddyfile + +COPY --from=backend-img /code /app/backend +COPY --from=backend-img /usr/local/lib/python3.12/site-packages/ /usr/local/lib/python3.12/site-packages/ +COPY --from=backend-img /usr/local/bin/ /usr/local/bin/ + +RUN apk add --no-cache nss-tools bash curl uuidgen ncdu vim + +RUN pip install supervisor +RUN mkdir -p /etc/supervisor/conf.d + +COPY start.sh /app/start.sh +COPY dist/plane.env /app/plane.env +COPY supervisor.conf /etc/supervisor/conf.d/supervisor.conf + +RUN mkdir -p /app/logs/access && \ + mkdir -p /app/logs/error && \ + mkdir -p /app/data && \ + chmod +x /app/start.sh + +VOLUME ['/app/data', '/app/logs'] + +EXPOSE 80 443 + +CMD ["/app/start.sh"] diff --git a/deployments/aio/community/README.md b/deployments/aio/community/README.md new file mode 100644 index 00000000000..96aab6737d4 --- /dev/null +++ b/deployments/aio/community/README.md @@ -0,0 +1,174 @@ +# Plane Community All-In-One (AIO) Docker Image + +The Plane Community All-In-One Docker image packages all Plane services into a single container for easy deployment and testing. This image includes web interface, API server, background workers, live server, and more. + +## What's Included + +The AIO image contains the following services: + +- **Web App** (Port 3001): Main Plane web interface +- **Space** (Port 3002): Public project spaces +- **Admin** (Port 3003): Administrative interface +- **API Server** (Port 3004): Backend API +- **Live Server** (Port 3005): Real-time collaboration +- **Proxy** (Port 80, 443): Caddy reverse proxy +- **Worker & Beat**: Background task processing + +## Prerequisites + +### Required External Services + +The AIO image requires these external services to be running: + +- **PostgreSQL Database**: For data storage +- **Redis**: For caching and session management +- **RabbitMQ**: For message queuing +- **S3-Compatible Storage**: For file uploads (AWS S3 or MinIO) + +### Required Environment Variables + +You must provide these environment variables: + +#### Core Configuration + +- `DOMAIN_NAME`: Your domain name or IP address +- `DATABASE_URL`: PostgreSQL connection string +- `REDIS_URL`: Redis connection string +- `AMQP_URL`: RabbitMQ connection string + +#### Storage Configuration + +- `AWS_REGION`: AWS region (e.g., us-east-1) +- `AWS_ACCESS_KEY_ID`: S3 access key +- `AWS_SECRET_ACCESS_KEY`: S3 secret key +- `AWS_S3_BUCKET_NAME`: S3 bucket name +- `AWS_S3_ENDPOINT_URL`: S3 endpoint (optional, defaults to AWS) + +## Quick Start + +### Basic Usage + +```bash +docker run --name plane-aio --rm -it \ + -p 80:80 \ + -e DOMAIN_NAME=your-domain.com \ + -e DATABASE_URL=postgresql://user:pass@host:port/database \ + -e REDIS_URL=redis://host:port \ + -e AMQP_URL=amqp://user:pass@host:port/vhost \ + -e AWS_REGION=us-east-1 \ + -e AWS_ACCESS_KEY_ID=your-access-key \ + -e AWS_SECRET_ACCESS_KEY=your-secret-key \ + -e AWS_S3_BUCKET_NAME=your-bucket \ + artifacts.plane.so/makeplane/plane-aio-community:latest +``` + +### Example with IP Address + +```bash +MYIP=192.168.68.169 +docker run --name myaio --rm -it \ + -p 80:80 \ + -e DOMAIN_NAME=${MYIP} \ + -e DATABASE_URL=postgresql://plane:plane@${MYIP}:15432/plane \ + -e REDIS_URL=redis://${MYIP}:16379 \ + -e AMQP_URL=amqp://plane:plane@${MYIP}:15673/plane \ + -e AWS_REGION=us-east-1 \ + -e AWS_ACCESS_KEY_ID=5MV45J9NF5TEFZWYCRAX \ + -e AWS_SECRET_ACCESS_KEY=7xMqAiAHsf2UUjMH+EwICXlyJL9TO30m8leEaDsL \ + -e AWS_S3_BUCKET_NAME=plane-app \ + -e AWS_S3_ENDPOINT_URL=http://${MYIP}:19000 \ + -e FILE_SIZE_LIMIT=10485760 \ + artifacts.plane.so/makeplane/plane-aio-community:latest +``` + +## Configuration Options + +### Optional Environment Variables + +#### Network & Protocol + +- `SITE_ADDRESS`: Server bind address (default: `:80`) + + +#### Security & Secrets + +- `SECRET_KEY`: Django secret key (default provided) +- `LIVE_SERVER_SECRET_KEY`: Live server secret (default provided) + +#### File Handling + +- `FILE_SIZE_LIMIT`: Maximum file upload size in bytes (default: `5242880` = 5MB) + +#### API Configuration + +- `API_KEY_RATE_LIMIT`: API key rate limit (default: `60/minute`) + +## Port Mapping + +The following ports are exposed: + +- `80`: Main web interface (HTTP) +- `443`: HTTPS (if SSL configured) + +## Volume Mounts + +### Recommended Persistent Volumes + +```bash +-v /path/to/logs:/app/logs \ +-v /path/to/data:/app/data +``` + +## Building the Image + +To build the AIO image yourself: + +```bash +cd deployments/aio/community +IMAGE_NAME=myplane-aio ./build.sh --release=v0.27.1 [--platform=linux/amd64] +``` + +Available build options: + +- `--release`: Plane version to build (required) +- `--image-name`: Custom image name (default: `plane-aio-community`) + +## Troubleshooting + +### Logs + +All service logs are available in `/app/logs/`: + +- Access logs: `/app/logs/access/` +- Error logs: `/app/logs/error/` + +### Health Checks + +The container runs multiple services managed by Supervisor. Check service status: + +```bash +docker exec -it supervisorctl status +``` + +### Common Issues + +1. **Database Connection Failed**: Ensure PostgreSQL is accessible and credentials are correct +2. **Redis Connection Failed**: Verify Redis server is running and URL is correct +3. **File Upload Issues**: Check S3 credentials and bucket permissions + +### Environment Validation + +The container will validate required environment variables on startup and display helpful error messages if any are missing. + +## Production Considerations + +- Use proper SSL certificates for HTTPS +- Configure proper backup strategies for data +- Monitor resource usage and scale accordingly +- Use external load balancer for high availability +- Regularly update to latest versions +- Secure your environment variables and secrets + +## Support + +For issues and support, please refer to the official Plane documentation. diff --git a/deployments/aio/community/build.sh b/deployments/aio/community/build.sh new file mode 100755 index 00000000000..fa1f0b1ba88 --- /dev/null +++ b/deployments/aio/community/build.sh @@ -0,0 +1,155 @@ +#!/bin/bash + +set -e + +DIST_DIR=${DIST_DIR:-./dist} +CPU_ARCH=$(uname -m) +IMAGE_NAME=${IMAGE_NAME:-makeplane/plane-aio-community} + + +# loop though all flags and set the variables +for arg in "$@"; do + case $arg in + --release) + APP_RELEASE_VERSION="$2" + shift + shift + ;; + --release=*) + APP_RELEASE_VERSION="${arg#*=}" + shift + ;; + --image-name) + IMAGE_NAME="$2" + shift + shift + ;; + --image-name=*) + IMAGE_NAME="${arg#*=}" + shift + ;; + esac +done + + +if [ -z "$APP_RELEASE_VERSION" ]; then + echo "" + echo "Usage: " + echo " ./build.sh [flags]" + echo "" + echo "Flags:" + echo " --release= required (e.g. v0.27.1)" + echo "" + echo "Example: ./build.sh --release=v0.27.1 --platform=linux/amd64" + exit 1 +fi + +# Install yq if not present +if ! command -v yq &> /dev/null; then + echo "Installing yq..." + sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_${CPU_ARCH} + sudo chmod +x /usr/local/bin/yq +fi + +cd $(dirname $0) + +string_replace(){ + local file="$1" + local search="$2" + local replace="$3" + + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s|$search|$replace|g" "$file" + else + sed -i "s|$search|$replace|g" "$file" + fi +} +remove_line(){ + local file="$1" + local line="$2" + + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' '/'$line'/d' "$file" + else + sed -i '/'$line'/d' "$file" + fi +} + +update_env_file(){ + local file="$1" + local key="$2" + local value="$3" + + # if key is in file, replace it + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' 's|^'$key'=.*|'$key'='$value'|' "$file" + else + sed -i 's|^'$key'=.*|'$key'='$value'|' "$file" + fi + + # if key not in file, add it + if ! grep -q "^$key=" "$file"; then + echo "$key=$value" >> "$file" + fi +} + +build_dist_files(){ + cp ./variables.env $DIST_DIR/plane.env + cp ../../../apps/proxy/Caddyfile.ce $DIST_DIR/Caddyfile + + echo "" >> $DIST_DIR/plane.env + echo "" >> $DIST_DIR/plane.env + + # update the plane.env file with the APP_RELEASE_VERSION + update_env_file $DIST_DIR/plane.env "APP_RELEASE_VERSION" "$APP_RELEASE_VERSION" + update_env_file $DIST_DIR/plane.env "APP_RELEASE" "$APP_RELEASE_VERSION" + update_env_file $DIST_DIR/plane.env "APP_VERSION" "$APP_RELEASE_VERSION" + + update_env_file $DIST_DIR/plane.env "API_BASE_URL" "http://localhost:3004" + update_env_file $DIST_DIR/plane.env "SITE_ADDRESS" ":80" + + # remove this line containing `plane-minio:9000` + remove_line $DIST_DIR/Caddyfile "plane-minio:9000" "" + + # in caddyfile, update `reverse_proxy /spaces/* space:3000` to `reverse_proxy /spaces/* space:3002` + string_replace $DIST_DIR/Caddyfile "web:3000" "localhost:3001" + string_replace $DIST_DIR/Caddyfile "space:3000" "localhost:3002" + string_replace $DIST_DIR/Caddyfile "admin:3000" "localhost:3003" + string_replace $DIST_DIR/Caddyfile "api:8000" "localhost:3004" + string_replace $DIST_DIR/Caddyfile "live:3000" "localhost:3005" + + + # print docker build command + echo "------------------------------------------------" + echo "Run the following command to build the image:" + echo "------------------------------------------------" + echo "" + echo "docker build -t $IMAGE_NAME \\" + echo " -f $(pwd)/Dockerfile \\" + echo " --build-arg PLANE_VERSION=$APP_RELEASE_VERSION \\" + echo " $(pwd)" + echo "" + echo "------------------------------------------------" +} + + +main(){ + # check if the dist directory exists + echo "" + if [ -d "$DIST_DIR" ]; then + echo "Cleaning existing dist directory..." + rm -rf $DIST_DIR + fi + echo "Creating dist directory..." + mkdir -p $DIST_DIR + echo "" + + build_dist_files + if [ $? -ne 0 ]; then + echo "Error: Failed to build docker image" + exit 1 + fi +} + +main "$@" + diff --git a/deployments/aio/community/start.sh b/deployments/aio/community/start.sh new file mode 100644 index 00000000000..6b8c81561d5 --- /dev/null +++ b/deployments/aio/community/start.sh @@ -0,0 +1,169 @@ +#!/bin/bash -e + +print_header(){ + clear + echo "------------------------------------------------" + echo "Plane Community (All-In-One)" + echo "------------------------------------------------" + echo "" + echo "You are required to pass below environment variables to the script" + echo " DOMAIN_NAME, DATABASE_URL, REDIS_URL, AMQP_URL" + echo " AWS_REGION, AWS_ACCESS_KEY_ID" + echo " AWS_SECRET_ACCESS_KEY, AWS_S3_BUCKET_NAME" + echo "" + echo "Other optional environment variables: " + echo " SITE_ADDRESS (default: ':80')" + echo " FILE_SIZE_LIMIT (default: 5242880)" + echo " APP_PROTOCOL (http or https)" + echo " SECRET_KEY (default: 60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5)" + echo " LIVE_SERVER_SECRET_KEY (default: htbqvBJAgpm9bzvf3r4urJer0ENReatceh)" + echo "" + echo "" +} + +check_required_env(){ + echo "Checking required environment variables..." + local keys=("DOMAIN_NAME" "DATABASE_URL" "REDIS_URL" "AMQP_URL" + "AWS_REGION" "AWS_ACCESS_KEY_ID" "AWS_SECRET_ACCESS_KEY" "AWS_S3_BUCKET_NAME") + + local missing_keys=() + # Check if the environment variable is set and not empty + for key in "${keys[@]}"; do + if [ -z "${!key}" ]; then + echo " ❌ '$key' is not set or is empty" + missing_keys+=("$key") + fi + done + + if [ ${#missing_keys[@]} -gt 0 ]; then + echo "" + exit 1 + fi + # add checkmark + echo "✅ Required environment variables are available" + echo "" +} + +update_env_value(){ + local key="$1" + local value="$2" + + # check if the file exists + if [ ! -f "plane.env" ]; then + echo "plane.env file not found" + exit 1 + fi + + # check if the key exists and add it if it doesn't + if ! grep -q "^$key=.*" plane.env; then + echo "${key}=${value}" >> plane.env + return 0 + fi + + # if key and value are not empty, update the value + if [ -n "$key" ] && [ -n "$value" ]; then + sed -i "s|^$key=.*|$key=$value|" plane.env + return 0 + fi + +} + +check_pre_requisites(){ + check_required_env + + # check if the file exists + if [ ! -f "plane.env" ]; then + echo "plane.env file not found" + exit 1 + fi + # add a new line to the end of the file + echo "" >> plane.env + echo "" >> plane.env + echo "✅ Pre-requisites checked" + echo "" + +} + +validate_domain_name() { + local domain="$1" + + # Check if it's an IP address first + if [[ "$domain" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + echo "IP" + return 0 + fi + + # FQDN validation regex + local fqdn_regex='^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*\.?$' + + if [[ "$domain" =~ $fqdn_regex ]]; then + # Additional checks + if [[ ${#domain} -le 253 ]] && [[ ! "$domain" =~ \.\. ]] && [[ ! "$domain" =~ ^- ]] && [[ ! "$domain" =~ -\. ]]; then + echo "FQDN" + return 0 + fi + fi + + echo "INVALID" + return 1 +} + +update_env_file(){ + echo "Updating environment file..." + # check if DOMAIN_NAME is valid IP address + local domain_type=$(validate_domain_name "$DOMAIN_NAME") + if [ "$domain_type" == "INVALID" ]; then + echo "DOMAIN_NAME is not a valid FQDN or IP address" + exit 1 + fi + + local app_protocol=${APP_PROTOCOL:-http} + + update_env_value "APP_PROTOCOL" "$app_protocol" + update_env_value "DOMAIN_NAME" "$DOMAIN_NAME" + update_env_value "APP_DOMAIN" "$DOMAIN_NAME" + if [ -n "$SITE_ADDRESS" ]; then + update_env_value "SITE_ADDRESS" "$SITE_ADDRESS" + else + update_env_value "SITE_ADDRESS" ":80" + fi + update_env_value "WEB_URL" "$app_protocol://$DOMAIN_NAME" + update_env_value "CORS_ALLOWED_ORIGINS" "http://$DOMAIN_NAME,https://$DOMAIN_NAME" + + # update database url + update_env_value "DATABASE_URL" "$DATABASE_URL" + update_env_value "REDIS_URL" "$REDIS_URL" + update_env_value "AMQP_URL" "$AMQP_URL" + + # update aws credentials + update_env_value "AWS_REGION" "$AWS_REGION" + update_env_value "AWS_ACCESS_KEY_ID" "$AWS_ACCESS_KEY_ID" + update_env_value "AWS_SECRET_ACCESS_KEY" "$AWS_SECRET_ACCESS_KEY" + update_env_value "AWS_S3_BUCKET_NAME" "$AWS_S3_BUCKET_NAME" + update_env_value "AWS_S3_ENDPOINT_URL" "${AWS_S3_ENDPOINT_URL:-https://s3.${AWS_REGION}.amazonaws.com}" + update_env_value "BUCKET_NAME" "$AWS_S3_BUCKET_NAME" + update_env_value "USE_MINIO" "0" + + # Optional environment variables + update_env_value "SECRET_KEY" "${SECRET_KEY:-60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5}" + update_env_value "FILE_SIZE_LIMIT" "${FILE_SIZE_LIMIT:-5242880}" + update_env_value "LIVE_SERVER_SECRET_KEY" "${LIVE_SERVER_SECRET_KEY:-htbqvBJAgpm9bzvf3r4urJer0ENReatceh}" + + update_env_value "API_KEY_RATE_LIMIT" "${API_KEY_RATE_LIMIT:-60/minute}" + + echo "✅ Environment file updated" + echo "" +} + +main(){ + print_header + check_pre_requisites + update_env_file + + # load plane.env as exported variables + export $(grep -v '^#' plane.env | xargs) + + /usr/local/bin/supervisord -c /etc/supervisor/conf.d/supervisor.conf +} + +main "$@" \ No newline at end of file diff --git a/deployments/aio/community/supervisor.conf b/deployments/aio/community/supervisor.conf new file mode 100644 index 00000000000..5a9f7ac3229 --- /dev/null +++ b/deployments/aio/community/supervisor.conf @@ -0,0 +1,115 @@ +[supervisord] +user=root +nodaemon=true +stderr_logfile=/app/logs/error/supervisor.err.log +stdout_logfile=/app/logs/access/supervisor.log + +[program:migrator] +directory=/app/backend +command=sh -c "./bin/docker-entrypoint-migrator.sh" +autostart=true +autorestart=unexpected +stdout_logfile=/app/logs/access/migrator.log +stderr_logfile=/app/logs/error/migrator.err.log +# stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +priority=10 + + +[program:web] +command=sh -c "node /app/web/web/server.js" +autostart=true +autorestart=true +stdout_logfile=/app/logs/access/web.log +stderr_logfile=/app/logs/error/web.err.log +# stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +environment=PORT=3001,HOSTNAME=0.0.0.0 +priority=15 + +[program:space] +command=sh -c "node /app/space/space/server.js" +autostart=true +autorestart=true +stdout_logfile=/app/logs/access/space.log +stderr_logfile=/app/logs/error/space.err.log +# stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +environment=PORT=3002,HOSTNAME=0.0.0.0 +priority=15 + +[program:admin] +command=sh -c "node /app/admin/admin/server.js" +autostart=true +autorestart=true +stdout_logfile=/app/logs/access/admin.log +stderr_logfile=/app/logs/error/admin.err.log +# stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +environment=PORT=3003,HOSTNAME=0.0.0.0 +priority=15 + +[program:api] +directory=/app/backend +command=sh -c "./bin/docker-entrypoint-api.sh" +autostart=true +autorestart=true +stdout_logfile=/app/logs/access/api.log +stdout_logfile_maxbytes=0 +stderr_logfile=/app/logs/error/api.err.log +# stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +environment=PORT=3004,HOSTNAME=0.0.0.0 +priority=15 + + +[program:worker] +directory=/app/backend +command=sh -c "./bin/docker-entrypoint-worker.sh" +autostart=true +autorestart=true +stdout_logfile=/app/logs/access/worker.log +stdout_logfile_maxbytes=0 +stderr_logfile=/app/logs/error/worker.err.log +# stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +priority=20 + +[program:beat] +directory=/app/backend +command=sh -c "./bin/docker-entrypoint-beat.sh" +autostart=true +autorestart=true +stdout_logfile=/app/logs/access/beat.log +stdout_logfile_maxbytes=0 +stderr_logfile=/app/logs/error/beat.err.log +# stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +priority=20 + + +[program:live] +directory=/app/live +command=sh -c "node live/dist/start.js live" +autostart=true +autorestart=true +stdout_logfile=/app/logs/access/live.log +stdout_logfile_maxbytes=0 +stderr_logfile=/app/logs/error/live.err.log +# stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +environment=PORT=3005,HOSTNAME=0.0.0.0 +priority=20 + + +[program:proxy] +directory=/app/proxy +command=sh -c "caddy run --config /app/proxy/Caddyfile" +autostart=true +autorestart=true +stdout_logfile=/app/logs/access/proxy.log +stdout_logfile_maxbytes=0 +stderr_logfile=/app/logs/error/proxy.err.log +# stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +priority=20 \ No newline at end of file diff --git a/deployments/aio/community/variables.env b/deployments/aio/community/variables.env new file mode 100644 index 00000000000..99d93e3fda2 --- /dev/null +++ b/deployments/aio/community/variables.env @@ -0,0 +1,53 @@ +APP_DOMAIN=localhost +APP_RELEASE=stable + +# If SSL Cert to be generated, set CERT_EMAIl="email " +CERT_EMAIL= +CERT_ACME_CA=https://acme-v02.api.letsencrypt.org/directory + +SITE_ADDRESS=:80 + +# For DNS Challenge based certificate generation, set the CERT_ACME_DNS, CERT_EMAIL +# CERT_ACME_DNS="acme_dns " +CERT_ACME_DNS= + +WEB_URL=http://localhost +DEBUG=0 +CORS_ALLOWED_ORIGINS=http://localhost +API_BASE_URL=http://localhost:3004 + +#DB SETTINGS +DATABASE_URL= + +# REDIS SETTINGS +REDIS_URL= + +# RabbitMQ Settings +AMQP_URL= + +# Secret Key +SECRET_KEY=60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5 + +# DATA STORE SETTINGS +USE_MINIO=0 +AWS_REGION= +AWS_ACCESS_KEY_ID= +AWS_SECRET_ACCESS_KEY= +AWS_S3_ENDPOINT_URL=https://s3.amazonaws.com +AWS_S3_BUCKET_NAME= +BUCKET_NAME= +FILE_SIZE_LIMIT=5242880 + +# Gunicorn Workers +GUNICORN_WORKERS=1 + + + +# Force HTTPS for handling SSL Termination +MINIO_ENDPOINT_SSL=0 + +# API key rate limit +API_KEY_RATE_LIMIT=60/minute + +# Live Server Secret Key +LIVE_SERVER_SECRET_KEY=htbqvBJAgpm9bzvf3r4urJer0ENReatceh diff --git a/deploy/selfhost/README.md b/deployments/cli/community/README.md similarity index 100% rename from deploy/selfhost/README.md rename to deployments/cli/community/README.md diff --git a/deploy/selfhost/build.yml b/deployments/cli/community/build.yml similarity index 100% rename from deploy/selfhost/build.yml rename to deployments/cli/community/build.yml diff --git a/deploy/selfhost/docker-compose.yml b/deployments/cli/community/docker-compose.yml similarity index 91% rename from deploy/selfhost/docker-compose.yml rename to deployments/cli/community/docker-compose.yml index baca1c3caa9..41940ea8a71 100644 --- a/deploy/selfhost/docker-compose.yml +++ b/deployments/cli/community/docker-compose.yml @@ -24,9 +24,16 @@ x-aws-s3-env: &aws-s3-env AWS_S3_BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads} x-proxy-env: &proxy-env - NGINX_PORT: ${NGINX_PORT:-80} - BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads} + SSL: ${SSL:-false} + APP_DOMAIN: ${APP_DOMAIN:-localhost} FILE_SIZE_LIMIT: ${FILE_SIZE_LIMIT:-5242880} + CERT_EMAIL: ${CERT_EMAIL} + CERT_ACME_CA: ${CERT_ACME_CA} + CERT_ACME_DNS: ${CERT_ACME_DNS} + LISTEN_HTTP_PORT: ${LISTEN_PORT:-80} + LISTEN_HTTPS_PORT: ${LISTEN_SSL_PORT:-443} + BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads} + SITE_ADDRESS: ${SITE_ADDRESS:-:80} x-mq-env: &mq-env # RabbitMQ Settings RABBITMQ_HOST: ${RABBITMQ_HOST:-plane-mq} @@ -213,21 +220,30 @@ services: # Comment this if you already have a reverse proxy running proxy: image: artifacts.plane.so/makeplane/plane-proxy:${APP_RELEASE:-stable} - ports: - - target: 80 - published: ${NGINX_PORT:-80} - protocol: tcp - mode: host - environment: - <<: *proxy-env deploy: replicas: 1 restart_policy: condition: on-failure + environment: + <<: *proxy-env + ports: + - target: 80 + published: ${LISTEN_HTTP_PORT:-80} + protocol: tcp + mode: host + - target: 443 + published: ${LISTEN_HTTPS_PORT:-443} + protocol: tcp + mode: host + volumes: + - proxy_config:/config + - proxy_data:/data depends_on: - - web - - api - - space + - web + - api + - space + - admin + - live volumes: pgdata: @@ -238,3 +254,5 @@ volumes: logs_beat-worker: logs_migrator: rabbitmq_data: + proxy_config: + proxy_data: diff --git a/deploy/selfhost/images/download.png b/deployments/cli/community/images/download.png similarity index 100% rename from deploy/selfhost/images/download.png rename to deployments/cli/community/images/download.png diff --git a/deploy/selfhost/images/migrate-error.png b/deployments/cli/community/images/migrate-error.png similarity index 100% rename from deploy/selfhost/images/migrate-error.png rename to deployments/cli/community/images/migrate-error.png diff --git a/deploy/selfhost/images/restart.png b/deployments/cli/community/images/restart.png similarity index 100% rename from deploy/selfhost/images/restart.png rename to deployments/cli/community/images/restart.png diff --git a/deploy/selfhost/images/started.png b/deployments/cli/community/images/started.png similarity index 100% rename from deploy/selfhost/images/started.png rename to deployments/cli/community/images/started.png diff --git a/deploy/selfhost/images/stopped.png b/deployments/cli/community/images/stopped.png similarity index 100% rename from deploy/selfhost/images/stopped.png rename to deployments/cli/community/images/stopped.png diff --git a/deploy/selfhost/images/upgrade.png b/deployments/cli/community/images/upgrade.png similarity index 100% rename from deploy/selfhost/images/upgrade.png rename to deployments/cli/community/images/upgrade.png diff --git a/deploy/selfhost/install.sh b/deployments/cli/community/install.sh similarity index 99% rename from deploy/selfhost/install.sh rename to deployments/cli/community/install.sh index 9f0065f66c3..8b8a92ec122 100755 --- a/deploy/selfhost/install.sh +++ b/deployments/cli/community/install.sh @@ -9,7 +9,7 @@ export DOCKERHUB_USER=artifacts.plane.so/makeplane export PULL_POLICY=${PULL_POLICY:-if_not_present} export GH_REPO=makeplane/plane export RELEASE_DOWNLOAD_URL="https://github.com/$GH_REPO/releases/download" -export FALLBACK_DOWNLOAD_URL="https://raw.githubusercontent.com/$GH_REPO/$BRANCH/deploy/selfhost" +export FALLBACK_DOWNLOAD_URL="https://raw.githubusercontent.com/$GH_REPO/$BRANCH/deployments/cli/community" CPU_ARCH=$(uname -m) OS_NAME=$(uname) @@ -196,7 +196,7 @@ function buildYourOwnImage(){ REPO=https://github.com/$GH_REPO.git git clone "$REPO" "$PLANE_TEMP_CODE_DIR" --branch "$BRANCH" --single-branch --depth 1 - cp "$PLANE_TEMP_CODE_DIR/deploy/selfhost/build.yml" "$PLANE_TEMP_CODE_DIR/build.yml" + cp "$PLANE_TEMP_CODE_DIR/deployments/cli/community/build.yml" "$PLANE_TEMP_CODE_DIR/build.yml" cd "$PLANE_TEMP_CODE_DIR" || exit diff --git a/deploy/selfhost/migration-0.13-0.14.sh b/deployments/cli/community/migration-0.13-0.14.sh similarity index 100% rename from deploy/selfhost/migration-0.13-0.14.sh rename to deployments/cli/community/migration-0.13-0.14.sh diff --git a/deploy/selfhost/restore-airgapped.sh b/deployments/cli/community/restore-airgapped.sh similarity index 100% rename from deploy/selfhost/restore-airgapped.sh rename to deployments/cli/community/restore-airgapped.sh diff --git a/deploy/selfhost/restore.sh b/deployments/cli/community/restore.sh similarity index 100% rename from deploy/selfhost/restore.sh rename to deployments/cli/community/restore.sh diff --git a/deploy/selfhost/variables.env b/deployments/cli/community/variables.env similarity index 75% rename from deploy/selfhost/variables.env rename to deployments/cli/community/variables.env index 78031a4acd4..0edda2a898f 100644 --- a/deploy/selfhost/variables.env +++ b/deployments/cli/community/variables.env @@ -1,5 +1,6 @@ APP_DOMAIN=localhost APP_RELEASE=stable +SSL=false WEB_REPLICAS=1 SPACE_REPLICAS=1 @@ -9,7 +10,8 @@ WORKER_REPLICAS=1 BEAT_WORKER_REPLICAS=1 LIVE_REPLICAS=1 -NGINX_PORT=80 +LISTEN_PORT=80 +LISTEN_SSL_PORT=443 WEB_URL=http://${APP_DOMAIN} DEBUG=0 CORS_ALLOWED_ORIGINS=http://${APP_DOMAIN} @@ -38,6 +40,19 @@ RABBITMQ_PASSWORD=plane RABBITMQ_VHOST=plane AMQP_URL= +# If SSL Cert to be generated, set CERT_EMAIl="email " +CERT_ACME_CA=https://acme-v02.api.letsencrypt.org/directory +TRUSTED_PROXIES=0.0.0.0/0 +SITE_ADDRESS=:80 +CERT_EMAIL= + + + +# For DNS Challenge based certificate generation, set the CERT_ACME_DNS, CERT_EMAIL +# CERT_ACME_DNS="acme_dns " +CERT_ACME_DNS= + + # Secret Key SECRET_KEY=60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5 diff --git a/deploy/kubernetes/README.md b/deployments/kubernetes/community/README.md similarity index 75% rename from deploy/kubernetes/README.md rename to deployments/kubernetes/community/README.md index 905721813a8..c1000c7327c 100644 --- a/deploy/kubernetes/README.md +++ b/deployments/kubernetes/community/README.md @@ -1,5 +1,5 @@ -# Helm Chart +# Helm Chart: Plane Community Click on the below link to access the helm chart instructions. -[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/makeplane)](https://artifacthub.io/packages/search?repo=makeplane) +[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/makeplane)](https://artifacthub.io/packages/helm/makeplane/plane-ce) diff --git a/deploy/selfhost/swarm.sh b/deployments/swarm/community/swarm.sh similarity index 99% rename from deploy/selfhost/swarm.sh rename to deployments/swarm/community/swarm.sh index c58f05e51ee..d496c25e6de 100755 --- a/deploy/selfhost/swarm.sh +++ b/deployments/swarm/community/swarm.sh @@ -9,7 +9,7 @@ export DOCKERHUB_USER=artifacts.plane.so/makeplane export GH_REPO=makeplane/plane export RELEASE_DOWNLOAD_URL="https://github.com/$GH_REPO/releases/download" -export FALLBACK_DOWNLOAD_URL="https://raw.githubusercontent.com/$GH_REPO/$BRANCH/deploy/selfhost" +export FALLBACK_DOWNLOAD_URL="https://raw.githubusercontent.com/$GH_REPO/$BRANCH/deployments/cli/community" OS_NAME=$(uname) @@ -150,8 +150,7 @@ function updateEnvFile() { function download() { cd $SCRIPT_DIR || exit 1 TS=$(date +%s) - if [ -f "$PLANE_INSTALL_DIR/docker-compose.yml" ] - then + if [ -f "$PLANE_INSTALL_DIR/docker-compose.yml" ]; then mv $PLANE_INSTALL_DIR/docker-compose.yml $PLANE_INSTALL_DIR/archive/$TS.docker-compose.yml fi From c9e9a81a06c860e09ea75d653d158e3b0ce84bdf Mon Sep 17 00:00:00 2001 From: Manish Gupta Date: Fri, 11 Jul 2025 16:01:01 +0530 Subject: [PATCH 2/3] chore: update Dockerfile and supervisor configuration - Changed `apk add` command in Dockerfile to use `--no-cache` for better image size management. - Updated `build.sh` to ensure proper directory navigation with quotes around `dirname "$0"`. - Modified `supervisor.conf` to set `stderr_logfile_maxbytes` to 50MB and added `stderr_logfile_backups` for better log management across multiple services. --- apps/proxy/Dockerfile.ce | 2 +- deployments/aio/community/build.sh | 2 +- deployments/aio/community/supervisor.conf | 27 +++++++++++++++-------- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/apps/proxy/Dockerfile.ce b/apps/proxy/Dockerfile.ce index abec06b1c39..4d2f6dd0af7 100644 --- a/apps/proxy/Dockerfile.ce +++ b/apps/proxy/Dockerfile.ce @@ -7,7 +7,7 @@ RUN xcaddy build \ FROM caddy:2.10.0-builder-alpine -RUN apk add nss-tools bash curl +RUN apk add --no-cache nss-tools bash curl COPY --from=caddy-builder /usr/bin/caddy /usr/bin/caddy diff --git a/deployments/aio/community/build.sh b/deployments/aio/community/build.sh index fa1f0b1ba88..e69ac9a50b3 100755 --- a/deployments/aio/community/build.sh +++ b/deployments/aio/community/build.sh @@ -51,7 +51,7 @@ if ! command -v yq &> /dev/null; then sudo chmod +x /usr/local/bin/yq fi -cd $(dirname $0) +cd $(dirname "$0") string_replace(){ local file="$1" diff --git a/deployments/aio/community/supervisor.conf b/deployments/aio/community/supervisor.conf index 5a9f7ac3229..6a3b1b87d9e 100644 --- a/deployments/aio/community/supervisor.conf +++ b/deployments/aio/community/supervisor.conf @@ -12,7 +12,8 @@ autorestart=unexpected stdout_logfile=/app/logs/access/migrator.log stderr_logfile=/app/logs/error/migrator.err.log # stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 +stderr_logfile_maxbytes=50MB +stderr_logfile_backups=5 priority=10 @@ -23,7 +24,8 @@ autorestart=true stdout_logfile=/app/logs/access/web.log stderr_logfile=/app/logs/error/web.err.log # stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 +stderr_logfile_maxbytes=50MB +stderr_logfile_backups=5 environment=PORT=3001,HOSTNAME=0.0.0.0 priority=15 @@ -34,7 +36,8 @@ autorestart=true stdout_logfile=/app/logs/access/space.log stderr_logfile=/app/logs/error/space.err.log # stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 +stderr_logfile_maxbytes=50MB +stderr_logfile_backups=5 environment=PORT=3002,HOSTNAME=0.0.0.0 priority=15 @@ -45,7 +48,8 @@ autorestart=true stdout_logfile=/app/logs/access/admin.log stderr_logfile=/app/logs/error/admin.err.log # stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 +stderr_logfile_maxbytes=50MB +stderr_logfile_backups=5 environment=PORT=3003,HOSTNAME=0.0.0.0 priority=15 @@ -58,7 +62,8 @@ stdout_logfile=/app/logs/access/api.log stdout_logfile_maxbytes=0 stderr_logfile=/app/logs/error/api.err.log # stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 +stderr_logfile_maxbytes=50MB +stderr_logfile_backups=5 environment=PORT=3004,HOSTNAME=0.0.0.0 priority=15 @@ -72,7 +77,8 @@ stdout_logfile=/app/logs/access/worker.log stdout_logfile_maxbytes=0 stderr_logfile=/app/logs/error/worker.err.log # stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 +stderr_logfile_maxbytes=50MB +stderr_logfile_backups=5 priority=20 [program:beat] @@ -84,7 +90,8 @@ stdout_logfile=/app/logs/access/beat.log stdout_logfile_maxbytes=0 stderr_logfile=/app/logs/error/beat.err.log # stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 +stderr_logfile_maxbytes=50MB +stderr_logfile_backups=5 priority=20 @@ -97,7 +104,8 @@ stdout_logfile=/app/logs/access/live.log stdout_logfile_maxbytes=0 stderr_logfile=/app/logs/error/live.err.log # stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 +stderr_logfile_maxbytes=50MB +stderr_logfile_backups=5 environment=PORT=3005,HOSTNAME=0.0.0.0 priority=20 @@ -111,5 +119,6 @@ stdout_logfile=/app/logs/access/proxy.log stdout_logfile_maxbytes=0 stderr_logfile=/app/logs/error/proxy.err.log # stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 +stderr_logfile_maxbytes=50MB +stderr_logfile_backups=5 priority=20 \ No newline at end of file From 167dabf9e42293a2185076bad35121a33f0fd633 Mon Sep 17 00:00:00 2001 From: sriramveeraghanta Date: Mon, 14 Jul 2025 14:37:19 +0530 Subject: [PATCH 3/3] chore: consistent node and python version --- deployments/aio/community/Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/deployments/aio/community/Dockerfile b/deployments/aio/community/Dockerfile index 52a21d07da5..6c969405257 100644 --- a/deployments/aio/community/Dockerfile +++ b/deployments/aio/community/Dockerfile @@ -4,7 +4,8 @@ FROM --platform=$BUILDPLATFORM tonistiigi/binfmt AS binfmt # ************************************************** # STAGE 0: Image Loading # ************************************************** -FROM node:20-alpine AS node +FROM node:22-alpine AS node + FROM artifacts.plane.so/makeplane/plane-frontend:${PLANE_VERSION} AS web-img FROM artifacts.plane.so/makeplane/plane-backend:${PLANE_VERSION} AS backend-img FROM artifacts.plane.so/makeplane/plane-space:${PLANE_VERSION} AS space-img @@ -15,7 +16,7 @@ FROM artifacts.plane.so/makeplane/plane-proxy:${PLANE_VERSION} AS proxy-img # ************************************************** # STAGE 1: Runner # ************************************************** -FROM python:3.12.5-alpine AS runner +FROM python:3.12.10-alpine AS runner WORKDIR /app