From 8e5ac07a32142898961aefa51bb30234eb84d044 Mon Sep 17 00:00:00 2001 From: maxklema Date: Wed, 30 Jul 2025 12:16:08 -0400 Subject: [PATCH 1/8] LDAP configuration and prune scripts --- container creation/configureLDAP.sh | 34 +++++ container maintenance/start_services.sh | 15 +- intern-phxdc-pve1/prune_iptables.sh | 173 ++++++++++++++++++++++++ intern-phxdc-pve1/prune_temp_files.sh | 67 +++++++++ 4 files changed, 280 insertions(+), 9 deletions(-) create mode 100755 container creation/configureLDAP.sh create mode 100644 intern-phxdc-pve1/prune_iptables.sh create mode 100644 intern-phxdc-pve1/prune_temp_files.sh diff --git a/container creation/configureLDAP.sh b/container creation/configureLDAP.sh new file mode 100755 index 00000000..950601e2 --- /dev/null +++ b/container creation/configureLDAP.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Script to connect a container to the LDAP server via SSSD +# Last Modified by Maxwell Klema on July 29th, 2025 +# ----------------------------------------------------- + +# Curl Pown.sh script to install SSSD and configure LDAP +pct enter $CONTAINER_ID < /dev/null 2>&1 && \ +chmod +x pown.sh +EOF + +# Copy .env file to container +ENV_FILE="/var/lib/vz/snippets/.env" +pct enter $CONTAINER_ID < /root/.env +$(cat "$ENV_FILE") +EOT +EOF + +# Run the pown.sh script to configure LDAP +pct exec $CONTAINER_ID -- bash -c "cd /root && ./pown.sh" > /dev/null 2>&1 + +# remove ldap_tls_cert from /etc/sssd/sssd.conf +pct exec $CONTAINER_ID -- sed -i '/ldap_tls_cacert/d' /etc/sssd/sssd.conf > /dev/null 2>&1 + +# Add TLS_REQCERT to never in ROCKY + +if [ "${LINUX_DISTRO^^}" == "ROCKY" ]; then + pct exec $CONTAINER_ID -- bash -c "echo 'TLS_REQCERT never' >> /etc/openldap/ldap.conf" > /dev/null 2>&1 + pct exec $CONTAINER_ID -- bash -c "authselect select sssd --force" > /dev/null 2>&1 + pct exec $CONTAINER_ID -- bash -c "systemctl restart sssd" > /dev/null 2>&1 +fi diff --git a/container maintenance/start_services.sh b/container maintenance/start_services.sh index 165a96a0..5fa2f45c 100644 --- a/container maintenance/start_services.sh +++ b/container maintenance/start_services.sh @@ -25,17 +25,14 @@ UPDATE_CONTAINER="${17}" CONTAINER_NAME="${CONTAINER_NAME,,}" sleep 3 -pct stop $CONTAINER_ID > /dev/null 2>&1 - -echo "$START_COMMAND" -echo "$BUILD_COMMAND" -echo "$RUNTIME_LANGUAGE" - -sleep 10 - +if (( $CONTAINER_ID % 2 == 0 )); then + ssh root@10.15.0.5 "pct stop $CONTAINER_ID" > /dev/null 2>&1 +else + pct stop $CONTAINER_ID > /dev/null 2>&1 +fi # Create template if on default branch ==== -# source /var/lib/vz/snippets/helper-scripts/create-template.sh +source /var/lib/vz/snippets/helper-scripts/create-template.sh if (( $CONTAINER_ID % 2 == 0 )); then diff --git a/intern-phxdc-pve1/prune_iptables.sh b/intern-phxdc-pve1/prune_iptables.sh new file mode 100644 index 00000000..abc1aec2 --- /dev/null +++ b/intern-phxdc-pve1/prune_iptables.sh @@ -0,0 +1,173 @@ +#!/bin/bash + +# Script to prune iptables rules for containers that no longer exist +# Author: Carter Myers + +# Enable strict mode: +# -e: Exit immediately if a command exits with a non-zero status. +# -u: Treat unset variables as an error when substituting. +# -o pipefail: The return value of a pipeline is the status of the last command +# to exit with a non-zero status, or zero if all commands exit successfully. +set -euo pipefail + +# --- Configuration --- +REMOTE_HOST="intern-nginx" +REMOTE_FILE="/etc/nginx/port_map.json" +LOCAL_FILE="/tmp/port_map.json" +LOG_FILE="/var/log/prune_iptables.log" +PVE_NODES=("localhost" "10.15.0.5") + +# Function to log messages with a timestamp +log_message() { + echo "[$(date)] $1" >> "$LOG_FILE" +} + +# --- 1. Fetch port_map.json from remote host --- +log_message "Fetching port_map.json from $REMOTE_HOST..." +if ! scp "$REMOTE_HOST:$REMOTE_FILE" "$LOCAL_FILE" >/dev/null 2>&1; then + log_message "ERROR: Could not fetch $REMOTE_FILE from $REMOTE_HOST" + exit 1 +fi +log_message "Successfully fetched $REMOTE_FILE to $LOCAL_FILE." + +# --- 2. Build list of existing hostnames --- +EXISTING_HOSTNAMES="" +for node in "${PVE_NODES[@]}"; do + log_message "Checking containers on $node..." + if [[ "$node" == "localhost" ]]; then + CTIDS=$(pct list | awk 'NR>1 {print $1}' || true) + log_message "DEBUG: Local CTIDs: [${CTIDS:-}]" + for id in $CTIDS; do + hn=$(pct config "$id" 2>/dev/null | grep -i '^hostname:' | awk '{print $2}' | tr -d '[:space:]' || true) + [[ -n "$hn" ]] && EXISTING_HOSTNAMES+="$hn"$'\n' + done + else + log_message "DEBUG: Checking remote node: $node" + CTIDS_CMD="pct list | awk 'NR>1 {print \$1}'" + CTIDS_OUTPUT=$(ssh "$node" "$CTIDS_CMD" 2>&1 || true) + if [[ "$CTIDS_OUTPUT" =~ "Permission denied" || "$CTIDS_OUTPUT" =~ "Connection refused" || "$CTIDS_OUTPUT" =~ "Host key verification failed" ]]; then + log_message "ERROR: SSH to $node failed: $CTIDS_OUTPUT" + continue + fi + log_message "DEBUG: CTIDs on $node: [${CTIDS_OUTPUT:-}]" + for id in $CTIDS_OUTPUT; do + HN_CMD="pct config $id 2>/dev/null | grep -i '^hostname:' | awk '{print \$2}'" + HN_OUTPUT=$(ssh "$node" "$HN_CMD" 2>&1 || true) + if [[ "$HN_OUTPUT" =~ "Permission denied" || "$HN_OUTPUT" =~ "No such file" ]]; then + log_message "ERROR: Failed to get hostname for $id on $node: $HN_OUTPUT" + continue + fi + hn=$(echo "$HN_OUTPUT" | tr -d '[:space:]') + [[ -n "$hn" ]] && EXISTING_HOSTNAMES+="$hn"$'\n' + done + fi +done + +# Remove any empty lines from EXISTING_HOSTNAMES +EXISTING_HOSTNAMES=$(echo "$EXISTING_HOSTNAMES" | sed '/^$/d') +log_message "Existing hostnames collected:" +log_message "$EXISTING_HOSTNAMES" + +# --- 3. Prune iptables and port_map.json --- +log_message "Pruning iptables and port_map.json..." +cp "$LOCAL_FILE" "$LOCAL_FILE.bak" +log_message "Created backup of $LOCAL_FILE at $LOCAL_FILE.bak" + +HOSTNAMES_IN_JSON=$(jq -r 'keys[]' "$LOCAL_FILE") +mapfile -t EXISTING_ARRAY <<< "$EXISTING_HOSTNAMES" + +# Helper function to check if a hostname exists in the collected list +hostname_exists() { + local h=$(echo "$1" | tr -d '[:space:]') + for existing in "${EXISTING_ARRAY[@]}"; do + if [[ "${h,,}" == "${existing,,}" ]]; then # Case-insensitive comparison + return 0 + fi + done + return 1 +} + +for hostname in $HOSTNAMES_IN_JSON; do + trimmed_hostname=$(echo "$hostname" | tr -d '[:space:]') + if hostname_exists "$trimmed_hostname"; then + log_message "Keeping entry: $trimmed_hostname" + else + ip=$(jq -r --arg h "$hostname" '.[$h].ip // "unknown"' "$LOCAL_FILE") + ports=$(jq -c --arg h "$hostname" '.[$h].ports // {}' "$LOCAL_FILE") + log_message "Stale entry detected: $hostname (IP: $ip, Ports: $ports) - removing..." + + # --- IPTABLES REMOVAL --- + # Capture rules into an array first to avoid subshell issues with 'while read' + mapfile -t RULES_TO_DELETE < <(sudo iptables -t nat -S | grep -w "$ip" || true) # Added sudo, || true to prevent pipefail if grep finds nothing + + if [[ ${#RULES_TO_DELETE[@]} -gt 0 ]]; then + log_message "Found ${#RULES_TO_DELETE[@]} iptables rules for $hostname. Attempting removal..." + for rule in "${RULES_TO_DELETE[@]}"; do + cleaned_rule=$(echo "$rule" | sed 's/^-A /-D /') + log_message "Attempting to remove iptables rule: sudo iptables -t nat $cleaned_rule" + if sudo iptables -t nat $cleaned_rule; then + log_message "Removed iptables rule: $cleaned_rule" + else + log_message "ERROR: Failed to remove iptables rule: $cleaned_rule (Exit status: $?)" + fi + done + else + log_message "No iptables rules found for $hostname to remove." + fi + + # --- JSON ENTRY REMOVAL --- + log_message "Attempting to remove $hostname from local port_map.json..." + if jq "del(.\"$hostname\")" "$LOCAL_FILE" > "${LOCAL_FILE}.tmp"; then + if mv "${LOCAL_FILE}.tmp" "$LOCAL_FILE"; then + log_message "Successfully removed $hostname from local port_map.json." + else + log_message "ERROR: Failed to move temporary file to $LOCAL_FILE for $hostname." + exit 1 # Critical failure, exit + fi + else + log_message "ERROR: jq failed to delete $hostname from $LOCAL_FILE." + exit 1 # Critical failure, exit + fi + + # Confirm deletion from local file + if jq -e --arg h "$hostname" 'has($h)' "$LOCAL_FILE" >/dev/null; then + log_message "ERROR: $hostname still exists in local port_map.json after deletion attempt!" + else + log_message "Confirmed $hostname removed from local port_map.json." + fi + fi +done + +# --- 4. Upload and verify updated file on remote --- +log_message "Uploading updated port_map.json to $REMOTE_HOST..." +TEMP_REMOTE="/tmp/port_map.json" + +if scp "$LOCAL_FILE" "$REMOTE_HOST:$TEMP_REMOTE" >/dev/null 2>&1; then + log_message "Uploaded to $REMOTE_HOST:$TEMP_REMOTE" +else + log_message "ERROR: Failed to upload $TEMP_REMOTE to $REMOTE_HOST" + exit 1 +fi + +# Check if deleted hostnames still exist in uploaded file +log_message "Verifying remote file content..." +for hostname in $HOSTNAMES_IN_JSON; do + if ! hostname_exists "$hostname"; then # Only check for hostnames that *should* have been deleted + if ssh "$REMOTE_HOST" "grep -q '\"$hostname\"' $TEMP_REMOTE"; then + log_message "WARNING: $hostname still exists in uploaded $TEMP_REMOTE on $REMOTE_HOST!" + else + log_message "Verified $hostname was removed in uploaded file on $REMOTE_HOST." + fi + fi +done + +# Move uploaded file into place on the remote host +log_message "Moving uploaded file into final position on $REMOTE_HOST..." +if ssh "$REMOTE_HOST" "sudo cp $TEMP_REMOTE $REMOTE_FILE && sudo chown root:root $REMOTE_FILE && sudo chmod 644 $REMOTE_FILE && rm $TEMP_REMOTE"; then + log_message "Copied updated port_map.json to $REMOTE_FILE on $REMOTE_HOST" +else + log_message "ERROR: Failed to replace $REMOTE_FILE on $REMOTE_HOST" + exit 1 +fi + +log_message "Prune complete." \ No newline at end of file diff --git a/intern-phxdc-pve1/prune_temp_files.sh b/intern-phxdc-pve1/prune_temp_files.sh new file mode 100644 index 00000000..1b171fd1 --- /dev/null +++ b/intern-phxdc-pve1/prune_temp_files.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# Script to prune all temporary files (env vars, protocols, services, and public keys) +# Last Updated July 28th 2025 Maxwell Klema + +LOG_FILE="/var/log/pruneTempFiles.log" + +writeLog() { + echo "[$(date +'%Y-%m-%d %H:%M:%S')]: $1" >> "$LOG_FILE" +} + +# Function to remove temporary environment variable Folders +removeTempEnvVars() { + TEMP_ENV_FOLDER="/var/lib/vz/snippets/container-env-vars" + while read -r line; do + if [[ "$line" == /var/lib/vz/snippets/container-env-vars/env_* ]]; then + rm -rf "$line" > /dev/null 2>&1 + writeLog "Removed temporary environment variable folder: $line" + fi + done < <(find "$TEMP_ENV_FOLDER" -maxdepth 1 -type d -name "env_*") +} + +# Function to remove temporary services file +removeTempServices() { + TEMP_SERVICES_FOLDER="/var/lib/vz/snippets/container-services" + while read -r line; do + if [[ "$line" == /var/lib/vz/snippets/container-services/services_* ]]; then + rm -f "$line" + writeLog "Removed temporary services file: $line" + fi + done < <(find "$TEMP_SERVICES_FOLDER" -maxdepth 1 -type f -name "services_*") +} + +# Function to remove temporary public key files +removeTempPublicKeys() { + TEMP_PUB_FOLDER="/var/lib/vz/snippets/container-public-keys" + while read -r line; do + if [[ "$line" == /var/lib/vz/snippets/container-public-keys/key_* ]]; then + rm -f "$line" + writeLog "Removed temporary public key file: $line" + fi + done < <(find "$TEMP_PUB_FOLDER" -maxdepth 1 -type f -name "key_*") +} + +# Function to remove temporary protocol files +removeTempProtocols() { + TEMP_PROTOCOL_FOLDER="/var/lib/vz/snippets/container-port-maps" + while read -r line; do + if [[ "$line" == /var/lib/vz/snippets/container-port-maps/protocol_list* ]]; then + rm -f "$line" + writeLog "Removed temporary protocol file: $line" + fi + done < <(find "$TEMP_PROTOCOL_FOLDER" -maxdepth 1 -type f -name "protocol_list*") +} + +# Main function to prune all temporary files +pruneTempFiles() { + writeLog "Starting to prune temporary files..." + removeTempEnvVars + removeTempServices + removeTempPublicKeys + removeTempProtocols + writeLog "Finished pruning temporary files." +} + +# Execute the main function +pruneTempFiles +exit 0 \ No newline at end of file From eb5ae365ebadde094fe7e2f95967b08a416f47b5 Mon Sep 17 00:00:00 2001 From: maxklema Date: Thu, 31 Jul 2025 20:47:09 -0400 Subject: [PATCH 2/8] proxmox deployment changes --- container creation/create-container.sh | 77 +++++++++++++++---------- container creation/deployOnStart.sh | 32 +++++----- container creation/setup-runner.sh | 9 +-- container maintenance/start_services.sh | 6 +- 4 files changed, 68 insertions(+), 56 deletions(-) diff --git a/container creation/create-container.sh b/container creation/create-container.sh index dd485893..39098df3 100644 --- a/container creation/create-container.sh +++ b/container creation/create-container.sh @@ -33,7 +33,8 @@ echoContainerDetails() { echo -e "šŸ“¦ ${BLUE}Container ID :${RESET} $CONTAINER_ID" echo -e "🌐 ${MAGENTA}Internal IP :${RESET} $CONTAINER_IP" echo -e "šŸ”— ${GREEN}Domain Name :${RESET} https://$CONTAINER_NAME.opensource.mieweb.org" - echo -e "šŸ› ļø ${BLUE}SSH Access :${RESET} ssh -p $SSH_PORT root@$CONTAINER_NAME.opensource.mieweb.org" + echo -e "šŸ› ļø ${BLUE}SSH Access :${RESET} ssh -p $SSH_PORT $PROXMOX_USERNAME@$CONTAINER_NAME.opensource.mieweb.org" + echo -e "šŸ”‘ ${BLUE}Container Password :${RESET} Your proxmox account password" echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" echo -e "${BOLD}${MAGENTA}NOTE: Additional background scripts are being ran in detached terminal sessions.${RESET}" echo -e "${BOLD}${MAGENTA}Wait up to two minutes for all processes to complete.${RESET}" @@ -48,28 +49,27 @@ trap cleanup SIGINT SIGTERM SIGHUP CONTAINER_NAME="${CONTAINER_NAME,,}" CONTAINER_NAME="$1" -CONTAINER_PASSWORD="$2" -GH_ACTION="$3" -HTTP_PORT="$4" -PROXMOX_USERNAME="$5" -PUB_FILE="$6" -PROTOCOL_FILE="$7" +GH_ACTION="$2" +HTTP_PORT="$3" +PROXMOX_USERNAME="$4" +USERNAME_ONLY="${PROXMOX_USERNAME%@*}" +PUB_FILE="$5" +PROTOCOL_FILE="$6" # Deployment ENVS -DEPLOY_ON_START="$8" -PROJECT_REPOSITORY="$9" -PROJECT_BRANCH="${10}" -PROJECT_ROOT="${11}" -INSTALL_COMMAND=$(echo "${12}" | base64 -d) -BUILD_COMMAND=$(echo "${13}" | base64 -d) -START_COMMAND=$(echo "${14}" | base64 -d) -RUNTIME_LANGUAGE=$(echo "${15}" | base64 -d) -ENV_BASE_FOLDER="${16}" -SERVICES_BASE_FILE="${17}" -LINUX_DISTRO="${18}" -MULTI_COMPONENTS="${19}" -ROOT_START_COMMAND="${20}" -GITHUB_PAT="${21}" +DEPLOY_ON_START="$7" +PROJECT_REPOSITORY="$8" +PROJECT_BRANCH="$9" +PROJECT_ROOT="${10}" +INSTALL_COMMAND=$(echo "${11}" | base64 -d) +BUILD_COMMAND=$(echo "${12}" | base64 -d) +START_COMMAND=$(echo "${13}" | base64 -d) +RUNTIME_LANGUAGE=$(echo "${14}" | base64 -d) +ENV_BASE_FOLDER="${15}" +SERVICES_BASE_FILE="${16}" +LINUX_DISTRO="${17}" +MULTI_COMPONENTS="${18}" +ROOT_START_COMMAND="${19}" # Pick the correct template to clone ===== @@ -109,19 +109,16 @@ if [ "${GH_ACTION^^}" != "Y" ]; then pct set $CONTAINER_ID \ --tags "$PROXMOX_USERNAME" \ --tags "$LINUX_DISTRO" \ + --tags "LDAP" \ --onboot 1 > /dev/null 2>&1 pct start $CONTAINER_ID > /dev/null 2>&1 pveum aclmod /vms/$CONTAINER_ID --user "$PROXMOX_USERNAME@pve" --role PVEVMUser > /dev/null 2>&1 - #pct delete $CONTAINER_ID # Get the Container IP Address and install some packages echo "ā³ Waiting for DHCP to allocate IP address to container..." sleep 5 - - # Set password inside the container - pct exec $CONTAINER_ID -- bash -c "echo 'root:$CONTAINER_PASSWORD' | chpasswd" > /dev/null 2>&1 else CONTAINER_ID=$( { pct list; ssh root@10.15.0.5 'pct list'; } | awk -v name="$CONTAINER_NAME" '$3 == name {print $1}') fi @@ -133,7 +130,23 @@ if [ -f "/var/lib/vz/snippets/container-public-keys/$PUB_FILE" ]; then rm -rf /var/lib/vz/snippets/container-public-keys/$PUB_FILE > /dev/null 2>&1 fi -CONTAINER_IP=$(pct exec $CONTAINER_ID -- hostname -I | awk '{print $1}') +CONTAINER_IP="" +attempts=0 +max_attempts=10 + +while [[ -z "$CONTAINER_IP" && $attempts -lt $max_attempts ]]; do + CONTAINER_IP=$(pct exec "$CONTAINER_ID" -- hostname -I | awk '{print $1}') + [[ -z "$CONTAINER_IP" ]] && sleep 2 && ((attempts++)) +done + +if [[ -z "$CONTAINER_IP" ]]; then + echo "āŒ Timed out waiting for container to get an IP address." + exit 1 +fi + +# Set up SSSD to communicate with LDAP server ==== +echo "ā³ Configuring LDAP connection via SSSD..." +source /var/lib/vz/snippets/helper-scripts/configureLDAP.sh # Attempt to Automatically Deploy Project Inside Container @@ -154,14 +167,15 @@ fi pct exec $CONTAINER_ID -- bash -c "cd /root && touch container-updates.log" # Run Contianer Provision Script to add container to port_map.json - +echo "ā³ Running Container Provision Script..." if [ -f "/var/lib/vz/snippets/container-port-maps/$PROTOCOL_FILE" ]; then - /var/lib/vz/snippets/register-container.sh $CONTAINER_ID $HTTP_PORT /var/lib/vz/snippets/container-port-maps/$PROTOCOL_FILE - rm -rf /var/lib/vz/snippets/container-port-maps/$PROTOCOL_FILE > /dev/null 2>&1 + /var/lib/vz/snippets/register-container.sh $CONTAINER_ID $HTTP_PORT /var/lib/vz/snippets/container-port-maps/$PROTOCOL_FILE "$USERNAME_ONLY" + rm -rf /var/lib/vz/snippets/container-port-maps/$PROTOCOL_FILE > /dev/null 2>&1 else - /var/lib/vz/snippets/register-container.sh $CONTAINER_ID $HTTP_PORT + /var/lib/vz/snippets/register-container.sh $CONTAINER_ID $HTTP_PORT "" "$PROXMOX_USERNAME" fi + SSH_PORT=$(iptables -t nat -S PREROUTING | grep "to-destination $CONTAINER_IP:22" | awk -F'--dport ' '{print $2}' | awk '{print $1}' | head -n 1 || true) # Output container details and start services if necessary ===== @@ -189,11 +203,10 @@ bash /var/lib/vz/snippets/start_services.sh "$RUNTIME_LANGUAGE_B64" "$GH_ACTION" "$PROJECT_BRANCH" -"$GITHUB_PAT" ) # Safely quote each argument for the shell QUOTED_CMD=$(printf ' %q' "${CMD[@]}") tmux new-session -d -s "$CONTAINER_NAME" "$QUOTED_CMD" -exit 0 \ No newline at end of file +exit 0 diff --git a/container creation/deployOnStart.sh b/container creation/deployOnStart.sh index ae1bb486..82d01a62 100755 --- a/container creation/deployOnStart.sh +++ b/container creation/deployOnStart.sh @@ -13,7 +13,9 @@ echo "Repo base name: $REPO_BASE_NAME" pct enter $CONTAINER_ID < /dev/null +git clone $PROJECT_REPOSITORY && \ +cd /root/$REPO_BASE_NAME && \ +git checkout $PROJECT_BRANCH > /dev/null else cd /root/$REPO_BASE_NAME && git fetch && git pull && \ git checkout $PROJECT_BRANCH @@ -26,19 +28,21 @@ pct exec $CONTAINER_ID -- bash -c "chmod 700 ~/.bashrc" # enable full R/W/X perm ENV_BASE_FOLDER="/var/lib/vz/snippets/container-env-vars/${ENV_BASE_FOLDER}" -if [ "${MULTI_COMPONENTS^^}" == "Y" ]; then - for FILE in $ENV_BASE_FOLDER/*; do - FILE_BASENAME=$(basename "$FILE") - FILE_NAME="${FILE_BASENAME%.*}" - ENV_ROUTE=$(echo "$FILE_NAME" | tr '_' '/') # acts as the route to the correct folder to place .env file in. - - ENV_VARS=$(cat $ENV_BASE_FOLDER/$FILE_BASENAME) - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$ENV_ROUTE && echo "$ENV_VARS" > .env" > /dev/null 2>&1 - done -else - ENV_FOLDER_BASE_NAME=$(basename "$ENV_BASE_FOLDER") - ENV_VARS=$(cat $ENV_BASE_FOLDER/$ENV_FOLDER_BASE_NAME.txt) - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && echo "$ENV_VARS" > .env" > /dev/null 2>&1 +if [ ! -d "$ENV_BASE_FOLDER"]; then + if [ "${MULTI_COMPONENTS^^}" == "Y" ]; then + for FILE in $ENV_BASE_FOLDER/*; do + FILE_BASENAME=$(basename "$FILE") + FILE_NAME="${FILE_BASENAME%.*}" + ENV_ROUTE=$(echo "$FILE_NAME" | tr '_' '/') # acts as the route to the correct folder to place .env file in. + + ENV_VARS=$(cat $ENV_BASE_FOLDER/$FILE_BASENAME) + pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$ENV_ROUTE && echo "$ENV_VARS" > .env" > /dev/null 2>&1 + done + else + ENV_FOLDER_BASE_NAME=$(basename "$ENV_BASE_FOLDER") + ENV_VARS=$(cat $ENV_BASE_FOLDER/$ENV_FOLDER_BASE_NAME.txt || true) + pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && echo "$ENV_VARS" > .env" > /dev/null 2>&1 + fi fi # Run Installation Commands ==== diff --git a/container creation/setup-runner.sh b/container creation/setup-runner.sh index 53bcf442..7b2a701e 100644 --- a/container creation/setup-runner.sh +++ b/container creation/setup-runner.sh @@ -30,7 +30,7 @@ TEMPLATE_NAME="template-$REPO_BASE_NAME-$REPO_BASE_NAME_WITH_OWNER" CTID_TEMPLATE=$( { pct list; ssh root@10.15.0.5 'pct list'; } | awk -v name="$TEMPLATE_NAME" '$3 == name {print $1}') case "${LINUX_DISTRIBUTION^^}" in - DEBIAN) PACKAGE_MANAGER="apt-get" ;; + "") PACKAGE_MANAGER="apt-get" ;; ROCKY) PACKAGE_MANAGER="dnf" ;; esac @@ -38,10 +38,10 @@ esac if [ -z "$CTID_TEMPLATE" ]; then case "${LINUX_DISTRIBUTION^^}" in - DEBIAN) CTID_TEMPLATE="160" ;; + "") CTID_TEMPLATE="160" ;; ROCKY) CTID_TEMPLATE="138" ;; esac -fi +fi REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") REPO_BASE_NAME_WITH_OWNER=$(echo "$PROJECT_REPOSITORY" | cut -d'/' -f4) @@ -67,9 +67,6 @@ sleep 5 echo "ā³ DHCP Allocating IP Address..." CONTAINER_IP=$(pct exec $NEXT_ID -- hostname -I | awk '{print $1}') -# Set password inside the container -pct exec $NEXT_ID -- bash -c "echo 'root:$CONTAINER_PASSWORD' | chpasswd" > /dev/null 2>&1 - # Setting Up Github Runner ===== # Get Temporary Token diff --git a/container maintenance/start_services.sh b/container maintenance/start_services.sh index 5fa2f45c..02f2ad97 100644 --- a/container maintenance/start_services.sh +++ b/container maintenance/start_services.sh @@ -4,7 +4,6 @@ # Last Modified by Maxwell Klema on July 23rd, 2025 # ------------------------------------------------ -set -x CONTAINER_ID="$1" CONTAINER_NAME="$2" REPO_BASE_NAME="$3" @@ -20,12 +19,11 @@ BUILD_COMMAND=$(echo "${12}" | base64 -d) RUNTIME_LANGUAGE=$(echo "${13}" | base64 -d) GH_ACTION="${14}" PROJECT_BRANCH="${15}" -GITHUB_PAT="${16}" -UPDATE_CONTAINER="${17}" +UPDATE_CONTAINER="${16}" CONTAINER_NAME="${CONTAINER_NAME,,}" sleep 3 -if (( $CONTAINER_ID % 2 == 0 )); then +if (( $CONTAINER_ID % 2 == 0 )) && [ "$UPDATE_CONTAINER" == "true" ]; then ssh root@10.15.0.5 "pct stop $CONTAINER_ID" > /dev/null 2>&1 else pct stop $CONTAINER_ID > /dev/null 2>&1 From 9e371f385b7fd21d5138feb4f34a8bf476030a90 Mon Sep 17 00:00:00 2001 From: maxklema Date: Tue, 5 Aug 2025 19:31:54 +0000 Subject: [PATCH 3/8] updated container-creation scripts + re-organization --- .gitmodules | 3 + .../deployment-scripts}/gatherEnvVars.sh | 30 +- .../deployment-scripts}/gatherRuntimeLangs.sh | 25 +- .../deployment-scripts/gatherServices.sh | 158 ++++++++++ .../gatherSetupCommands.sh | 8 +- container creation/get-deployment-details.sh | 223 ++++++--------- .../get-lxc-container-details.sh | 269 +++++++++--------- .../protocols/master_protocol_list.txt | 145 ++++++++++ ...rvice_map.json => service_map_debian.json} | 48 ++-- .../services/service_map_rocky.json | 99 +++++++ proxmox-launchpad | 1 + 11 files changed, 704 insertions(+), 305 deletions(-) create mode 100644 .gitmodules rename {deployment-scripts => container creation/deployment-scripts}/gatherEnvVars.sh (74%) rename {deployment-scripts => container creation/deployment-scripts}/gatherRuntimeLangs.sh (63%) mode change 100644 => 100755 create mode 100755 container creation/deployment-scripts/gatherServices.sh rename {deployment-scripts => container creation/deployment-scripts}/gatherSetupCommands.sh (83%) create mode 100644 container creation/protocols/master_protocol_list.txt rename container creation/services/{service_map.json => service_map_debian.json} (56%) create mode 100644 container creation/services/service_map_rocky.json create mode 160000 proxmox-launchpad diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..c8f2499b --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "proxmox-launchpad"] + path = proxmox-launchpad + url = https://github.com/maxklema/proxmox-launchpad.git diff --git a/deployment-scripts/gatherEnvVars.sh b/container creation/deployment-scripts/gatherEnvVars.sh similarity index 74% rename from deployment-scripts/gatherEnvVars.sh rename to container creation/deployment-scripts/gatherEnvVars.sh index cd9b9d5a..9453c065 100644 --- a/deployment-scripts/gatherEnvVars.sh +++ b/container creation/deployment-scripts/gatherEnvVars.sh @@ -8,7 +8,13 @@ gatherEnvVars(){ read -p "šŸ”‘ Enter Environment Variable Value → " ENV_VAR_VALUE while [ "$ENV_VAR_KEY" == "" ] || [ "$ENV_VAR_VALUE" == "" ]; do + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Key and value cannot be empty. Please try again." + writeLog "Empty environment variable key or value entered (GH_ACTION mode)" + exit 15 + fi echo "āš ļø Key or value cannot be empty. Try again." + writeLog "Empty environment variable key or value entered" read -p "šŸ”‘ Enter Environment Variable Key → " ENV_VAR_KEY read -p "šŸ”‘ Enter Environment Variable Value → " ENV_VAR_VALUE done @@ -28,9 +34,16 @@ fi while [ "${REQUIRE_ENV_VARS^^}" != "Y" ] && [ "${REQUIRE_ENV_VARS^^}" != "N" ] && [ "${REQUIRE_ENV_VARS^^}" != "" ]; do echo "āš ļø Invalid option. Please try again." + writeLog "Invalid environment variables requirement option entered: $REQUIRE_ENV_VARS" read -p "šŸ”‘ Does your application require environment variables? (y/n) → " REQUIRE_ENV_VARS done +if [ "${GH_ACTION^^}" == "Y" ]; then + if [ ! -z "$CONTAINER_ENV_VARS" ]; then + REQUIRE_ENV_VARS="Y" + fi +fi + if [ "${REQUIRE_ENV_VARS^^}" == "Y" ]; then # generate random temp .env folder to store all env files for different components RANDOM_NUM=$(shuf -i 100000-999999 -n 1) @@ -51,8 +64,14 @@ if [ "${REQUIRE_ENV_VARS^^}" == "Y" ]; then addComponent "$key" done else + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Your \"CONTAINER_ENV_VARS\" is not valid JSON. Please re-format and try again." + writeLog "Invalid JSON in CONTAINER_ENV_VARS (GH_ACTION mode)" + exit 16 + fi echo "āš ļø Your \"CONTAINER_ENV_VARS\" is not valid JSON. Please re-format and try again." - exit 10 + writeLog "Invalid JSON in CONTAINER_ENV_VARS" + exit 16 fi else # No Environment Variables gatherComponentDir "Enter the path of your component to enter environment variables" @@ -71,12 +90,19 @@ if [ "${REQUIRE_ENV_VARS^^}" == "Y" ]; then ENV_FILE="env_$RANDOM_NUM.txt" ENV_FILE_PATH="/root/bin/env/$ENV_FOLDER/$ENV_FILE" touch "$ENV_FILE_PATH" + if [ ! -z "$CONTAINER_ENV_VARS" ]; then # Environment Variables if echo "$CONTAINER_ENV_VARS" | jq -e > /dev/null 2>&1; then #if exit status of jq is 0 (valid JSON) // success echo "$CONTAINER_ENV_VARS " | jq -r 'to_entries[] | "\(.key)=\(.value)"' > "$ENV_FILE_PATH" #k=v pairs else + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Your \"CONTAINER_ENV_VARS\" is not valid JSON. Please re-format and try again." + writeLog "Invalid JSON in CONTAINER_ENV_VARS for single component (GH_ACTION mode)" + exit 16 + fi echo "āš ļø Your \"CONTAINER_ENV_VARS\" is not valid JSON. Please re-format and try again." - exit 10 + writeLog "Invalid JSON in CONTAINER_ENV_VARS for single component" + exit 16 fi else # No Environment Variables gatherEnvVars "$ENV_FILE_PATH" diff --git a/deployment-scripts/gatherRuntimeLangs.sh b/container creation/deployment-scripts/gatherRuntimeLangs.sh old mode 100644 new mode 100755 similarity index 63% rename from deployment-scripts/gatherRuntimeLangs.sh rename to container creation/deployment-scripts/gatherRuntimeLangs.sh index 1331610f..508eff5c --- a/deployment-scripts/gatherRuntimeLangs.sh +++ b/container creation/deployment-scripts/gatherRuntimeLangs.sh @@ -9,6 +9,12 @@ gatherRunTime() { while [ "${RUNTIME_LANGUAGE^^}" != "NODEJS" ] && [ "${RUNTIME_LANGUAGE^^}" != "PYTHON" ]; do echo "āš ļø Sorry, that runtime environment is not yet supported. Only \"nodejs\" and \"python\" are currently supported." + writeLog "Unsupported runtime environment entered: $RUNTIME_LANGUAGE for component: $COMPONENT_PATH" + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "āš ļø Sorry, that runtime environment is not yet supported. Only \"nodejs\" and \"python\" are currently supported." + writeLog "Unsupported runtime environment entered: $RUNTIME_LANGUAGE (GH_ACTION mode)" + exit 17 + fi read -p "šŸ–„ļø Enter the underlying runtime environment for \"$COMPONENT_PATH\" (e.g., 'nodejs', 'python') → " RUNTIME_LANGUAGE done } @@ -28,6 +34,7 @@ removeFromList() { UNIQUE_COMPONENTS_CLONE=("${UNIQUE_COMPONENTS[@]}") RUNTIME_LANGUAGE_DICT={} + if [ "${MULTI_COMPONENT^^}" == 'Y' ]; then if [ ! -z "$RUNTIME_LANGUAGE" ]; then # Environment Variable Passed if echo "$RUNTIME_LANGUAGE" | jq -e > /dev/null 2>&1; then # Valid JSON @@ -35,12 +42,24 @@ if [ "${MULTI_COMPONENT^^}" == 'Y' ]; then removeFromList "$key" done if [ ${#UNIQUE_COMPONENTS_CLONE[@]} -gt 0 ]; then #if there are still components in the list, then not all runtimes were provided, so exit on error + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "You did not provide runtime languages for these components: \"${UNIQUE_COMPONENTS_CLONE[@]}\"." + writeLog "Missing runtime languages for components: ${UNIQUE_COMPONENTS_CLONE[@]} (GH_ACTION mode)" + exit 18 + fi echo "āš ļø You did not provide runtime languages for these components: \"${UNIQUE_COMPONENTS_CLONE[@]}\"." - exit 11 + writeLog "Missing runtime languages for components: ${UNIQUE_COMPONENTS_CLONE[@]}" + exit 18 fi else + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Your \"$RUNTIME_LANGUAGE\" is not valid JSON. Please re-format and try again." + writeLog "Invalid JSON in RUNTIME_LANGUAGE (GH_ACTION mode)" + exit 16 + fi echo "āš ļø Your \"$RUNTIME_LANGUAGE\" is not valid JSON. Please re-format and try again." - exit 10 + writeLog "Invalid JSON in RUNTIME_LANGUAGE" + exit 16 fi else # No Environment Variable Passed for CURRENT in "${UNIQUE_COMPONENTS[@]}"; do @@ -51,7 +70,7 @@ if [ "${MULTI_COMPONENT^^}" == 'Y' ]; then fi else if [ ! -z "$RUNTIME_LANGUAGE" ]; then - RUNTIME_LANGUAGE="true" + RT_ENV_VAR="true" fi gatherRunTime "$PROJECT_REPOSITORY" fi \ No newline at end of file diff --git a/container creation/deployment-scripts/gatherServices.sh b/container creation/deployment-scripts/gatherServices.sh new file mode 100755 index 00000000..1cc06c39 --- /dev/null +++ b/container creation/deployment-scripts/gatherServices.sh @@ -0,0 +1,158 @@ +SERVICE_MAP="/root/bin/services/service_map_$LINUX_DISTRIBUTION.json" +APPENDED_SERVICES=() + +# Helper function to check if a user has added the same service twice +serviceExists() { + SERVICE="$1" + for CURRENT in "${APPENDED_SERVICES[@]}"; do + if [ "${SERVICE,,}" == "${CURRENT,,}" ]; then + return 0 + fi + done + return 1 +} + +processService() { + local SERVICE="$1" + local MODE="$2" # "batch" or "single" + + SERVICE_IN_MAP=$(jq -r --arg key "${SERVICE,,}" '.[$key] // empty' "$SERVICE_MAP") + if serviceExists "$SERVICE"; then + if [ "$MODE" = "batch" ]; then + return 0 # skip to next in batch mode + else + echo "āš ļø You already added \"$SERVICE\" as a service. Please try again." + writeLog "Duplicate service attempted: $SERVICE" + return 0 + fi + elif [ "${SERVICE^^}" != "C" ] && [ "${SERVICE^^}" != "" ] && [ -n "$SERVICE_IN_MAP" ]; then + jq -r --arg key "${SERVICE,,}" '.[$key][]' "$SERVICE_MAP" >> "$TEMP_SERVICES_FILE_PATH" + echo "sudo systemctl daemon-reload" >> "$TEMP_SERVICES_FILE_PATH" + echo "āœ… ${SERVICE^^} added to your container." + APPENDED_SERVICES+=("${SERVICE^^}") + elif [ "${SERVICE^^}" == "C" ]; then + appendCustomService + elif [ "${SERVICE^^}" != "" ]; then + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "āš ļø Service \"$SERVICE\" does not exist." + writeLog "Invalid service entered: $SERVICE (GH_ACTION mode)" + exit 20 + fi + echo "āš ļø Service \"$SERVICE\" does not exist." + writeLog "Invalid service entered: $SERVICE" + [ "$MODE" = "batch" ] && exit 20 + fi +} + +# Helper function to append a new service to a container +appendService() { + if [ ! -z "$SERVICES" ]; then + for SERVICE in $(echo "$SERVICES" | jq -r '.[]'); do + processService "$SERVICE" "batch" + done + else + read -p "āž”ļø Enter the name of a service to add to your container or type \"C\" to set up a custom service installation (Enter to exit) → " SERVICE + processService "$SERVICE" "single" + fi +} + +appendCustomService() { + # If there is an env variable for custom services, iterate through each command and append it to temporary services file + if [ ! -z "$CUSTOM_SERVICES" ]; then + echo "$CUSTOM_SERVICES" | jq -c -r '.[]' | while read -r CUSTOM_SERVICE; do + echo "$CUSTOM_SERVICE" | jq -c -r '.[]' | while read -r CUSTOM_SERVICE_COMMAND; do + if [ ! -z "$CUSTOM_SERVICE_COMMAND" ]; then + echo "$CUSTOM_SERVICE_COMMAND" >> "$TEMP_SERVICES_FILE_PATH" + else + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "āš ļø Custom Service Installation Command cannot be empty in \"$CUSTOM_SERVICE\"." + writeLog "Empty custom service command in: $CUSTOM_SERVICE (GH_ACTION mode)" + exit 21 + fi + echo "āš ļø Command cannot be empty." + writeLog "Empty custom service command in: $CUSTOM_SERVICE" + exit 21; + fi + done + done + echo "āœ… Custom Services appended." + else + echo "šŸ›Žļø Configuring Custom Service Installation. For each prompt, enter a command that is a part of the installation process for your service on Debian Bookworm. Do not forget to enable and start the service at the end. Once you have entered all of your commands, press enter to continue" + COMMAND_NUM=1 + read -p "āž”ļø Enter Command $COMMAND_NUM: " CUSTOM_COMMAND + + echo "$CUSTOM_COMMAND" >> "$TEMP_SERVICES_FILE_PATH" + + while [ "${CUSTOM_COMMAND^^}" != "" ]; do + ((COMMAND_NUM++)) + read -p "āž”ļø Enter Command $COMMAND_NUM: " CUSTOM_COMMAND + echo "$CUSTOM_COMMAND" >> "$TEMP_SERVICES_FILE_PATH" + done + fi +} + +# Helper function to see if a user wants to set up a custom service +setUpService() { + read -p "šŸ›Žļø Do you wish to set up a custom service installation? (y/n) " SETUP_CUSTOM_SERVICE_INSTALLATION + while [ "${SETUP_CUSTOM_SERVICE_INSTALLATION^^}" != "Y" ] && [ "${SETUP_CUSTOM_SERVICE_INSTALLATION^^}" != "N" ] && [ "${SETUP_CUSTOM_SERVICE_INSTALLATION^^}" != "" ]; do + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "āš ļø Invalid custom service installation option. Please try again." + writeLog "Invalid custom service installation option entered: $SETUP_CUSTOM_SERVICE_INSTALLATION (GH_ACTION mode)" + exit 22 + fi + echo "āš ļø Invalid option. Please try again." + writeLog "Invalid custom service installation option entered: $SETUP_CUSTOM_SERVICE_INSTALLATION" + read -p "šŸ›Žļø Do you wish to set up a custom service installation? (y/n) " SETUP_CUSTOM_SERVICE_INSTALLATION + done +} + +if [ -z "$REQUIRE_SERVICES" ]; then + read -p "šŸ›Žļø Does your application require special services (i.e. Docker, MongoDB, etc.) to run on the container? (y/n) → " REQUIRE_SERVICES +fi + +while [ "${REQUIRE_SERVICES^^}" != "Y" ] && [ "${REQUIRE_SERVICES^^}" != "N" ] && [ "${REQUIRE_SERVICES^^}" != "" ]; do + echo "āš ļø Invalid option. Please try again." + writeLog "Invalid service requirement option entered: $REQUIRE_SERVICES" + read -p "šŸ›Žļø Does your application require special services (i.e. Docker, MongoDB, etc.) to run on the container? (y/n) → " REQUIRE_SERVICES +done + +if [ "${GH_ACTION^^}" == "Y" ]; then + if [ ! -z "$SERVICES" ] || [ ! -z "$CUSTOM_SERVICES" ]; then + REQUIRE_SERVICES="Y" + fi +fi + +if [ "${REQUIRE_SERVICES^^}" == "Y" ]; then + + # Generate random (temporary) file to store install commands for needed services + RANDOM_NUM=$(shuf -i 100000-999999 -n 1) + SERVICES_FILE="services_$RANDOM_NUM.txt" + TEMP_SERVICES_FILE_PATH="/root/bin/services/$SERVICES_FILE" + touch "$TEMP_SERVICES_FILE_PATH" + + appendService + while [ "${SERVICE^^}" != "" ] || [ ! -z "$SERVICES" ]; do + if [ -z "$SERVICES" ]; then + appendService + else + if [ ! -z "$CUSTOM_SERVICES" ]; then # assumes both services and custom services passed as ENV vars + appendCustomService + else # custom services not passed as ENV var, so must prompt the user for their custom services + setUpService + while [ "${SETUP_CUSTOM_SERVICE_INSTALLATION^^}" == "Y" ]; do + appendCustomService + setUpService + done + fi + break + fi + done +fi + +# Used for updating container services in GH Actions + +UPDATING_CONTAINER="$1" +if [ "$UPDATING_CONTAINER" == "true" ]; then + cat "$TEMP_SERVICES_FILE_PATH" + rm -rf "$TEMP_SERVICES_FILE_PATH" +fi \ No newline at end of file diff --git a/deployment-scripts/gatherSetupCommands.sh b/container creation/deployment-scripts/gatherSetupCommands.sh similarity index 83% rename from deployment-scripts/gatherSetupCommands.sh rename to container creation/deployment-scripts/gatherSetupCommands.sh index 4aaf6478..6ff40ecb 100644 --- a/deployment-scripts/gatherSetupCommands.sh +++ b/container creation/deployment-scripts/gatherSetupCommands.sh @@ -1,6 +1,6 @@ #!/bin/bash # This function gathers start up commands, such as build, install, and start, for both single and multiple component applications -# Last Modified by Maxwell Klema on July 16th, 2025 +# Last Modified by Maxwell Klema on July 15th, 2025 # --------------------------------------------- gatherSetupCommands() { @@ -19,7 +19,13 @@ gatherSetupCommands() { addComponent "$key" done else + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Your \"$TYPE_COMMAND\" is not valid JSON. Please re-format and try again." + writeLog "Invalid JSON in $TYPE_COMMAND (GH_ACTION mode)" + exit 10 + fi echo "āš ļø Your \"$TYPE_COMMAND\" is not valid JSON. Please re-format and try again." + writeLog "Invalid JSON in $TYPE_COMMAND" exit 10 fi else # No Environment Variable Passed diff --git a/container creation/get-deployment-details.sh b/container creation/get-deployment-details.sh index 5d1e9631..2f5f4215 100755 --- a/container creation/get-deployment-details.sh +++ b/container creation/get-deployment-details.sh @@ -1,6 +1,6 @@ #!/bin/bash # Helper script to gather project details for automatic deployment -# Modified July 17th, 2025 by Maxwell Klema +# Modified August 5th, 2025 by Maxwell Klema # ------------------------------------------ # Define color variables (works on both light and dark backgrounds) @@ -12,60 +12,94 @@ echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━ echo -e "${BOLD}${MAGENTA}🌐 Let's Get Your Project Automatically Deployed ${RESET}" echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" +writeLog "Starting deploy application script" + # Get and validate project repository ======== if [ -z "$PROJECT_REPOSITORY" ]; then read -p "šŸš€ Paste the link to your project repository → " PROJECT_REPOSITORY + writeLog "Prompted for project repository" fi CheckRepository() { PROJECT_REPOSITORY_SHORTENED=${PROJECT_REPOSITORY#*github.com/} PROJECT_REPOSITORY_SHORTENED=${PROJECT_REPOSITORY_SHORTENED%.git} - REPOSITORY_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" https://github.com/$RROJECT_REPOSITORY) + REPOSITORY_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" https://github.com/$PROJECT_REPOSITORY_SHORTENED) + writeLog "Checking repository existence for $PROJECT_REPOSITORY_SHORTENED" } CheckRepository while [ "$REPOSITORY_EXISTS" != "200" ]; do + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Invalid Repository Link. Make sure your repository is private." + writeLog "Invalid repository link entered: $PROJECT_REPOSITORY (GH_ACTION mode)" + exit 10 + fi echo "āš ļø The repository link you provided, \"$PROJECT_REPOSITORY\" was not valid." + writeLog "Invalid repository link entered: $PROJECT_REPOSITORY" read -p "šŸš€ Paste the link to your project repository → " PROJECT_REPOSITORY CheckRepository done +writeLog "Repository validated: $PROJECT_REPOSITORY" + # Get Repository Branch ======== if [ -z "$PROJECT_BRANCH" ]; then read -p "🪾 Enter the project branch to deploy from (leave blank for \"main\") → " PROJECT_BRANCH + writeLog "Prompted for project branch" fi if [ "$PROJECT_BRANCH" == "" ]; then PROJECT_BRANCH="main" + writeLog "Using default branch: main" fi -REPOSITORY_BRANCH_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" $PROJECT_REPOSITORY/tree/$PROJECT_BRANCH) +REPOSITORY_BRANCH_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" https://github.com/$PROJECT_REPOSITORY_SHORTENED/tree/$PROJECT_BRANCH) +writeLog "Checking branch existence for $PROJECT_BRANCH" + while [ "$REPOSITORY_BRANCH_EXISTS" != "200" ]; do + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Invalid Branch. Make sure your branch exists on the repository." + writeLog "Invalid branch entered: $PROJECT_BRANCH (GH_ACTION mode)" + exit 11 + fi echo "āš ļø The branch you provided, \"$PROJECT_BRANCH\", does not exist on repository at \"$PROJECT_REPOSITORY\"." + writeLog "Invalid branch entered: $PROJECT_BRANCH" read -p "🪾 Enter the project branch to deploy from (leave blank for \"main\") → " PROJECT_BRANCH - if [ "PROJECT_BRANCH" == "" ]; then - PROJECT_BRANCH="main" + if [ "$PROJECT_BRANCH" == "" ]; then + PROJECT_BRANCH="main" fi - REPOSITORY_BRANCH_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" $PROJECT_REPOSITORY_SHORTENED/tree/$PROJECT_BRANCH) + REPOSITORY_BRANCH_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" https://github.com/$PROJECT_REPOSITORY_SHORTENED/tree/$PROJECT_BRANCH) done +writeLog "Branch validated: $PROJECT_BRANCH" + # Get Project Root Directory ======== if [ -z "$PROJECT_ROOT" ]; then read -p "šŸ“ Enter the project root directory (relative to repository root directory, or leave blank for root directory) → " PROJECT_ROOT + writeLog "Prompted for project root directory" fi VALID_PROJECT_ROOT=$(node /root/bin/js/runner.js authenticateRepo "$PROJECT_REPOSITORY" "$PROJECT_BRANCH" "$PROJECT_ROOT") +writeLog "Validating project root directory: $PROJECT_ROOT" while [ "$VALID_PROJECT_ROOT" == "false" ]; do + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Invalid Project Root Directory. Make sure your directory exists on the repository." + writeLog "Invalid project root directory entered: $PROJECT_ROOT (GH_ACTION mode)" + exit 12 + fi echo "āš ļø The root directory you provided, \"$PROJECT_ROOT\", does not exist on branch, \"$PROJECT_BRANCH\", on repository at \"$PROJECT_REPOSITORY\"." + writeLog "Invalid project root directory entered: $PROJECT_ROOT" read -p "šŸ“ Enter the project root directory (relative to repository root directory, or leave blank for root directory) → " PROJECT_ROOT VALID_PROJECT_ROOT=$(node /root/bin/js/runner.js authenticateRepo "$PROJECT_REPOSITORY" "$PROJECT_BRANCH" "$PROJECT_ROOT") done +writeLog "Project root directory validated: $PROJECT_ROOT" + # Remove forward slash if [[ "$PROJECT_ROOT" == "/*" ]]; then PROJECT_ROOT="${PROJECT_ROOT:1}" @@ -75,13 +109,28 @@ fi if [ -z "$MULTI_COMPONENT" ]; then read -p "šŸ”— Does your app consist of multiple components that run independently, i.e. seperate frontend and backend (y/n) → " MULTI_COMPONENT + writeLog "Prompted for multi-component option" fi while [ "${MULTI_COMPONENT^^}" != "Y" ] && [ "${MULTI_COMPONENT^^}" != "N" ] && [ "${MULTI_COMPONENT^^}" != "" ]; do + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Invalid option for MULTI_COMPONENT. It must be 'y' or 'n'. Please try again." + writeLog "Invalid multi-component option entered: $MULTI_COMPONENT (GH_ACTION mode)" + exit 13 + fi echo "āš ļø Invalid option. Please try again." + writeLog "Invalid multi-component option entered: $MULTI_COMPONENT" read -p "šŸ”— Does your app consist of multiple components that run independently, i.e. seperate frontend and backend (y/n) → " MULTI_COMPONENT done +if [ "${GH_ACTION^^}" == "Y" ]; then + if [ ! -z "$RUNTIME_LANGUAGE" ] && echo "$RUNTIME_LANGUAGE" | jq . >/dev/null 2>&1; then # If RUNTIME_LANGUAGE is set and is valid JSON + MULTI_COMPONENT="Y" + fi +fi + +writeLog "Multi-component option set to: $MULTI_COMPONENT" + # Gather Deployment Commands ======== # Helper functions to gather and validate component directory @@ -90,22 +139,35 @@ gatherComponentDir() { COMPONENT_PATH="$2" if [ -z "$COMPONENT_PATH" ]; then read -p "$1, relative to project root directory (To Continue, Press Enter) → " COMPONENT_PATH + writeLog "Prompted for component directory: $1" fi # Check that component path is valid VALID_COMPONENT_PATH=$(node /root/bin/js/runner.js authenticateRepo "$PROJECT_REPOSITORY" "$PROJECT_BRANCH" "$COMPONENT_PATH") + writeLog "Validating component path: $COMPONENT_PATH" + while [ "$VALID_COMPONENT_PATH" == "false" ] && [ "$COMPONENT_PATH" != "" ]; do + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Invalid Component Path: \"$COMPONENT_PATH\". Make sure your path exists on the repository." + writeLog "Invalid component path entered: $COMPONENT_PATH (GH_ACTION mode)" + exit 14 + fi echo "āš ļø The component path you entered, \"$COMPONENT_PATH\", does not exist on branch, \"$PROJECT_BRANCH\", on repository at \"$PROJECT_REPOSITORY\"." + writeLog "Invalid component path entered: $COMPONENT_PATH" if [ -z "$2" ]; then read -p "$1, relative to project root directory (To Continue, Press Enter) → " COMPONENT_PATH VALID_COMPONENT_PATH=$(node /root/bin/js/runner.js authenticateRepo "$PROJECT_REPOSITORY" "$PROJECT_BRANCH" "$COMPONENT_PATH") else - exit 9 + exit 14 fi done if [[ "$COMPONENT_PATH" == /* ]]; then COMPONENT_PATH="${COMPONENT_PATH:1}" # remove leading slash fi + + if [ "$COMPONENT_PATH" != "" ]; then + writeLog "Component path validated: $COMPONENT_PATH" + fi } UNIQUE_COMPONENTS=() @@ -119,151 +181,42 @@ addComponent() { fi done UNIQUE_COMPONENTS+=("$COMPONENT") + writeLog "Added component: $COMPONENT" } +writeLog "Sourcing setup commands script" source /root/bin/deployment-scripts/gatherSetupCommands.sh # Function to gather build, install, and start commands +writeLog "Sourcing environment variables script" source /root/bin/deployment-scripts/gatherEnvVars.sh # Gather Environment Variables + +writeLog "Gathering build commands" gatherSetupCommands "BUILD" "šŸ—ļø Enter the build command (leave blank if no build command) → " # Gather Build Command(s) -gatherSetupCommands "INSTALL" "šŸ“¦ Enter the install command (e.g., 'npm install') → " # Gather Install Command(s)echo "$INSTALL_COMMAND" -gatherSetupCommands "START" "🚦 Enter the start command (e.g., 'npm start', 'python app.py') → " # Gather Start Command(s) +writeLog "Gathering install commands" +gatherSetupCommands "INSTALL" "šŸ“¦ Enter the install command (e.g., 'npm install') → " # Gather Install Command(s) + +writeLog "Gathering start commands" +gatherSetupCommands "START" "🚦 Enter the start command (e.g., 'npm start', 'python app.py') → " # Gather Start Command(s) if [ "${MULTI_COMPONENT^^}" == "Y" ]; then if [ -z "$ROOT_START_COMMAND" ]; then read -p "šŸ“ If your container requires a start command at the root directory, i.e. Docker run, enter it here (leave blank for no command) → " ROOT_START_COMMAND + writeLog "Prompted for root start command" + fi + if [ "$ROOT_START_COMMAND" != "" ]; then + writeLog "Root start command set: $ROOT_START_COMMAND" fi fi # Get Runtime Language ======== +writeLog "Sourcing runtime languages script" source /root/bin/deployment-scripts/gatherRuntimeLangs.sh # Get Services ======== +writeLog "Sourcing services script" +source /root/bin/deployment-scripts/gatherServices.sh -SERVICE_MAP="/root/bin/services/service_map_$LINUX_DISTRIBUTION.json" -APPENDED_SERVICES=() - -# Helper function to check if a user has added the same service twice -serviceExists() { - SERVICE="$1" - for CURRENT in "${APPENDED_SERVICES[@]}"; do - if [ "${SERVICE,,}" == "${CURRENT,,}" ]; then - return 0 - fi - done - return 1 -} - -processService() { - local SERVICE="$1" - local MODE="$2" # "batch" or "single" - - SERVICE_IN_MAP=$(jq -r --arg key "${SERVICE,,}" '.[$key] // empty' "$SERVICE_MAP") - if serviceExists "$SERVICE"; then - if [ "$MODE" = "batch" ]; then - return 0 # skip to next in batch mode - else - echo "āš ļø You already added \"$SERVICE\" as a service. Please try again." - return 0 - fi - elif [ "${SERVICE^^}" != "C" ] && [ "${SERVICE^^}" != "" ] && [ -n "$SERVICE_IN_MAP" ]; then - jq -r --arg key "${SERVICE,,}" '.[$key][]' "$SERVICE_MAP" >> "$TEMP_SERVICES_FILE_PATH" - echo "sudo systemctl daemon-reload" >> "$TEMP_SERVICES_FILE_PATH" - echo "āœ… ${SERVICE^^} added to your container." - APPENDED_SERVICES+=("${SERVICE^^}") - elif [ "${SERVICE^^}" == "C" ]; then - appendCustomService - elif [ "${SERVICE^^}" != "" ]; then - echo "āš ļø Service \"$SERVICE\" does not exist." - [ "$MODE" = "batch" ] && exit 20 - fi -} - -# Helper function to append a new service to a container -appendService() { - if [ ! -z "$SERVICES" ]; then - for SERVICE in $(echo "$SERVICES" | jq -r '.[]'); do - processService "$SERVICE" "batch" - done - else - read -p "āž”ļø Enter the name of a service to add to your container or type \"C\" to set up a custom service installation (Enter to exit) → " SERVICE - processService "$SERVICE" "single" - fi -} - -appendCustomService() { - # If there is an env variable for custom services, iterate through each command and append it to temporary services file - if [ ! -z "$CUSTOM_SERVICES" ]; then - echo "$CUSTOM_SERVICES" | jq -c -r '.[]' | while read -r CUSTOM_SERVICE; do - echo "$CUSTOM_SERVICE" | jq -c -r '.[]' | while read -r CUSTOM_SERVICE_COMMAND; do - if [ ! -z "$CUSTOM_SERVICE_COMMAND" ]; then - echo "$CUSTOM_SERVICE_COMMAND" >> "$TEMP_SERVICES_FILE_PATH" - else - echo "āš ļø Command cannot be empty." - exit 21; - fi - done - done - echo "āœ… Custom Services appended." - else - echo "šŸ›Žļø Configuring Custom Service Installation. For each prompt, enter a command that is a part of the installation process for your service on Debian Bookworm. Do not forget to enable and start the service at the end. Once you have entered all of your commands, press enter to continue" - COMMAND_NUM=1 - read -p "āž”ļø Enter Command $COMMAND_NUM: " CUSTOM_COMMAND - - echo "$CUSTOM_COMMAND" >> "$TEMP_SERVICES_FILE_PATH" - - while [ "${CUSTOM_COMMAND^^}" != "" ]; do - ((COMMAND_NUM++)) - read -p "āž”ļø Enter Command $COMMAND_NUM: " CUSTOM_COMMAND - echo "$CUSTOM_COMMAND" >> "$TEMP_SERVICES_FILE_PATH" - done - fi -} - -# Helper function to see if a user wants to set up a custom service -setUpService() { - read -p "šŸ›Žļø Do you wish to set up a custom service installation? (y/n) " SETUP_CUSTOM_SERVICE_INSTALLATION - while [ "${REQUIRE_SERVICES^^}" != "Y" ] && [ "${REQUIRE_SERVICES^^}" != "N" ] && [ "${REQUIRE_SERVICES^^}" != "" ]; do - echo "āš ļø Invalid option. Please try again." - read -p "šŸ›Žļø Do you wish to set up a custom service installation? (y/n) " SETUP_CUSTOM_SERVICE_INSTALLATION - done -} - -if [ -z "$REQUIRE_SERVICES" ]; then - read -p "šŸ›Žļø Does your application require special services (i.e. Docker, MongoDB, etc.) to run on the container? (y/n) → " REQUIRE_SERVICES -fi - -while [ "${REQUIRE_SERVICES^^}" != "Y" ] && [ "${REQUIRE_SERVICES^^}" != "N" ] && [ "${REQUIRE_SERVICES^^}" != "" ]; do - echo "āš ļø Invalid option. Please try again." - read -p "šŸ›Žļø Does your application require special services (i.e. Docker, MongoDB, etc.) to run on the container? (y/n) → " REQUIRE_SERVICES -done - -if [ "${REQUIRE_SERVICES^^}" == "Y" ]; then - - # Generate random (temporary) file to store install commands for needed services - RANDOM_NUM=$(shuf -i 100000-999999 -n 1) - SERVICES_FILE="services_$RANDOM_NUM.txt" - TEMP_SERVICES_FILE_PATH="/root/bin/services/$SERVICES_FILE" - touch "$TEMP_SERVICES_FILE_PATH" - - appendService - while [ "${SERVICE^^}" != "" ] || [ ! -z "$SERVICES" ]; do - if [ -z "$SERVICES" ]; then - appendService - else - if [ ! -z "$CUSTOM_SERVICES" ]; then # assumes both services and custom services passed as ENV vars - appendCustomService - else # custom services not passed as ENV var, so must prompt the user for their custom services - setUpService - while [ "${SETUP_CUSTOM_SERVICE_INSTALLATION^^}" == "Y" ]; do - appendCustomService - setUpService - done - fi - break - fi - done -fi - -echo -e "\nāœ… Deployment Process Finished.\n" +writeLog "Deployment process finished successfully" +echo -e "\nāœ… Deployment Process Finished.\n" \ No newline at end of file diff --git a/container creation/get-lxc-container-details.sh b/container creation/get-lxc-container-details.sh index 3c746421..148900f2 100644 --- a/container creation/get-lxc-container-details.sh +++ b/container creation/get-lxc-container-details.sh @@ -1,8 +1,14 @@ #!/bin/bash # Main Container Creation Script -# Modified July 17th, 2025 by Maxwell Klema +# Modified July 28th, 2025 by Maxwell Klema # ------------------------------------------ +LOG_FILE="/var/log/create-container.log" + +writeLog() { + echo "[$(date +'%Y-%m-%d %H:%M:%S')]: $1" >> "$LOG_FILE" +} + # Define color variables (works on both light and dark backgrounds) RESET="\033[0m" BOLD="\033[1m" @@ -14,114 +20,79 @@ echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━ # Authenticate User (Only Valid Users can Create Containers) +outputError() { + echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + echo -e "${BOLD}${MAGENTA}āŒ Script Failed. Exiting... ${RESET}" + echo -e "$1" + echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" +} + +writeLog "Starting Container Creation Script" + if [ -z "$PROXMOX_USERNAME" ]; then - read -p "Enter Proxmox Username → " PROXMOX_USERNAME + read -p "Enter Proxmox Username → " PROXMOX_USERNAME fi if [ -z "$PROXMOX_PASSWORD" ]; then - read -sp "Enter Proxmox Password → " PROXMOX_PASSWORD - echo "" + read -sp "Enter Proxmox Password → " PROXMOX_PASSWORD + echo "" fi USER_AUTHENTICATED=$(node /root/bin/js/runner.js authenticateUser "$PROXMOX_USERNAME" "$PROXMOX_PASSWORD") -RETRIES=3 while [ $USER_AUTHENTICATED == 'false' ]; do - if [ $RETRIES -gt 0 ]; then - echo "āŒ Authentication Failed. Try Again" - read -p "Enter Proxmox Username → " PROXMOX_USERNAME - read -sp "Enter Proxmox Password → " PROXMOX_PASSWORD - echo "" - - USER_AUTHENTICATED=$(node /root/bin/js/runner.js authenticateUser "$PROXMOX_USERNAME" "$PROXMOX_PASSWORD") - RETRIES=$(($RETRIES-1)) - else - echo "Too many incorrect attempts. Exiting..." - exit 2 - fi + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Invalid Proxmox Credentials." + writeLog "Invalid Proxmox credentials entered for user: $PROXMOX_USERNAME (GH_ACTION mode)" + exit 2 + fi + echo "āŒ Authentication Failed. Try Again" + writeLog "Invalid Proxmox credentials entered for user: $PROXMOX_USERNAME" + read -p "Enter Proxmox Username → " PROXMOX_USERNAME + read -sp "Enter Proxmox Password → " PROXMOX_PASSWORD + echo "" + + USER_AUTHENTICATED=$(node /root/bin/js/runner.js authenticateUser "$PROXMOX_USERNAME" "$PROXMOX_PASSWORD") done echo "šŸŽ‰ Your proxmox account, $PROXMOX_USERNAME@pve, has been authenticated" -# Gather Container Hostname (hostname.opensource.mieweb.org) +# Gather Container Hostname (hostname.opensource.mieweb.org) ===== if [ -z "$CONTAINER_NAME" ]; then - read -p "Enter Application Name (One-Word) → " CONTAINER_NAME + read -p "Enter Application Name (One-Word) → " CONTAINER_NAME fi CONTAINER_NAME="${CONTAINER_NAME,,}" #convert to lowercase HOST_NAME_EXISTS=$(ssh root@10.15.20.69 "node /etc/nginx/checkHostnameRunner.js checkHostnameExists ${CONTAINER_NAME}") -HOST_NAME_RETRIES=10 while [[ $HOST_NAME_EXISTS == 'true' ]] || ! [[ "$CONTAINER_NAME" =~ ^[A-Za-z0-9-]+$ ]]; do - if [ $HOST_NAME_RETRIES -gt 0 ]; then - echo "Sorry! Either that name has already been registered or your hostname is ill-formatted. Try another name" - read -p "Enter Application Name (One-Word) → " CONTAINER_NAME - HOST_NAME_EXISTS=$(ssh root@10.15.20.69 "node /etc/nginx/checkHostnameRunner.js checkHostnameExists ${CONTAINER_NAME}") - HOST_NAME_RETRIES=$(($HOST_NAME_RETRIES-1)) - CONTAINER_NAME="${CONTAINER_NAME,,}" - else - echo "Too many incorrect attempts. Exiting..." - exit 3 - fi + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Invalid Container Hostname." + writeLog "Invalid container hostname entered: $CONTAINER_NAME (GH_ACTION mode)" + exit 3 + fi + echo "Sorry! Either that name has already been registered or your hostname is ill-formatted. Try another name" + writeLog "Invalid container hostname entered: $CONTAINER_NAME (already exists or ill-formatted)" + read -p "Enter Application Name (One-Word) → " CONTAINER_NAME + HOST_NAME_EXISTS=$(ssh root@10.15.20.69 "node /etc/nginx/checkHostnameRunner.js checkHostnameExists ${CONTAINER_NAME}") + CONTAINER_NAME="${CONTAINER_NAME,,}" done echo "āœ… $CONTAINER_NAME is available" -# Gather Container Password -PASSWORD_RETRIES=10 - -if [ -z "$CONTAINER_PASSWORD" ]; then - read -sp "Enter Container Password → " CONTAINER_PASSWORD - echo - read -sp "Confirm Container Password → " CONFIRM_PASSWORD - echo - - while [[ "$CONFIRM_PASSWORD" != "$CONTAINER_PASSWORD" || ${#CONTAINER_PASSWORD} -lt 8 ]]; do - if [ $PASSWORD_RETRIES -gt 0 ]; then - echo "Sorry, try again. Ensure passwords are at least 8 characters." - read -sp "Enter Container Password → " CONTAINER_PASSWORD - echo - read -sp "Confirm Container Password → " CONFIRM_PASSWORD - echo - PASSWORD_RETRIES=$(($PASSWORD_RETRIES-1)) - else - echo "Too many incorrect attempts. Exiting..." - exit 4 - fi - done -else - CONFIRM_PASSWORD="$CONTAINER_PASSWORD" - while [[ "$CONFIRM_PASSWORD" != "$CONTAINER_PASSWORD" || ${#CONTAINER_PASSWORD} -lt 8 ]]; do - if [ $PASSWORD_RETRIES -gt 0 ]; then - echo "Sorry, try again. Ensure passwords are at least 8 characters." - read -sp "Enter Container Password → " CONTAINER_PASSWORD - echo - read -sp "Confirm Container Password → " CONFIRM_PASSWORD - echo - PASSWORD_RETRIES=$(($PASSWORD_RETRIES-1)) - else - echo "Too many incorrect attempts. Exiting..." - exit 4 - fi - done -fi - # Choose Linux Distribution if [ -z "$LINUX_DISTRIBUTION" ]; then - echo "🐧 Available Linux Distributions:" - echo "1. Debian 12 (Bookworm)" - echo "2. Rocky 9 " - read -p "āž”ļø Choose a Linux Distribution (debian/rocky) → " LINUX_DISTRIBUTION + echo "🐧 Available Linux Distributions:" + echo "1. Debian 12 (Bookworm)" + echo "2. Rocky 9 " + read -p "āž”ļø Choose a Linux Distribution (debian/rocky) → " LINUX_DISTRIBUTION fi -while [ "${LINUX_DISTRIBUTION,,}" != "debian" ] && [ "${LINUX_DISTRIBUTION,,}" != "rocky" ]; do - echo "āŒ Please choose a valid Linux Distribution." - echo "1. Debian 12 (Bookworm)" - echo "2. Rocky 9 " - read -p "āž”ļø Choose a Linux Distribution (debian/rocky) → " LINUX_DISTRIBUTION -done +if [ "${LINUX_DISTRIBUTION,,}" != "debian" ] && [ "${LINUX_DISTRIBUTION,,}" != "rocky" ]; then + LINUX_DISTRIBUTION="debian" +fi LINUX_DISTRIBUTION=${LINUX_DISTRIBUTION,,} @@ -135,53 +106,54 @@ PUB_FILE="key_$RANDOM_NUM.pub" TEMP_PUB_FILE="/root/bin/ssh/temp_pubs/$PUB_FILE" # in case two users are running this script at the same time, they do not overwrite each other's temp files touch "$TEMP_PUB_FILE" DETECT_PUBLIC_KEY=$(sudo /root/bin/ssh/detectPublicKey.sh "$SSH_KEY_FP" "$TEMP_PUB_FILE") -KEY_RETRIES=10 if [ "$DETECT_PUBLIC_KEY" == "Public key found for create-container" ]; then - echo "šŸ” Public Key Found!" + echo "šŸ” Public Key Found!" else - echo "šŸ” Could not detect Public Key" - - if [ -z "$PUBLIC_KEY" ]; then - read -p "Enter Public Key (Allows Easy Access to Container) [OPTIONAL - LEAVE BLANK TO SKIP] → " PUBLIC_KEY - fi + echo "šŸ” Could not detect Public Key" - # Check if key is valid - - while [[ "$PUBLIC_KEY" != "" && $(echo "$PUBLIC_KEY" | ssh-keygen -l -f - 2>&1 | tr -d '\r') == "(stdin) is not a public key file." ]]; do - if [ $KEY_RETRIES -gt 0 ]; then - echo "āŒ \"$PUBLIC_KEY\" is not a valid key. Enter either a valid key or leave blank to skip." - read -p "Enter Public Key (Allows Easy Access to Container) [OPTIONAL - LEAVE BLANK TO SKIP] → " PUBLIC_KEY - KEY_RETRIES=$(($KEY_RETRIES-1)) - else - echo "Too many incorrect attempts. Exiting..." - exit 5 - fi - done + if [ -z "$PUBLIC_KEY" ]; then + read -p "Enter Public Key (Allows Easy Access to Container) [OPTIONAL - LEAVE BLANK TO SKIP] → " PUBLIC_KEY + fi - if [ "$PUBLIC_KEY" != "" ]; then - echo "$PUBLIC_KEY" > "$AUTHORIZED_KEYS" && systemctl restart ssh - echo "$PUBLIC_KEY" > "$TEMP_PUB_FILE" - sudo /root/bin/ssh/publicKeyAppendJumpHost.sh "$PUBLIC_KEY" - fi + # Check if key is valid + + while [[ "$PUBLIC_KEY" != "" && $(echo "$PUBLIC_KEY" | ssh-keygen -l -f - 2>&1 | tr -d '\r') == "(stdin) is not a public key file." ]]; do + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Invalid Public Key" + writeLog "Invalid public key entered (GH_ACTION mode)" + exit 5 + fi + echo "āŒ \"$PUBLIC_KEY\" is not a valid key. Enter either a valid key or leave blank to skip." + writeLog "Invalid public key entered: $PUBLIC_KEY" + read -p "Enter Public Key (Allows Easy Access to Container) [OPTIONAL - LEAVE BLANK TO SKIP] → " PUBLIC_KEY + done + + if [ "$PUBLIC_KEY" != "" ]; then + echo "$PUBLIC_KEY" > "$AUTHORIZED_KEYS" && systemctl restart ssh + echo "$PUBLIC_KEY" > "$TEMP_PUB_FILE" + sudo /root/bin/ssh/publicKeyAppendJumpHost.sh "$PUBLIC_KEY" + fi fi # Get HTTP Port Container Listens On -HTTP_PORT_RETRIES=10 if [ -z "$HTTP_PORT" ]; then read -p "Enter HTTP Port for your container to listen on (80-60000) → " HTTP_PORT + if [ "${GH_ACTION^^}" == "Y" ]; then + HTTP_PORT="3000" # Default to 3000 if not set + fi fi while ! [[ "$HTTP_PORT" =~ ^[0-9]+$ ]] || [ "$HTTP_PORT" -lt 80 ] || [ "$HTTP_PORT" -gt 60000 ]; do - if [ $HTTP_PORT_RETRIES -gt 0 ]; then - echo "āŒ Invalid HTTP Port. It must be a number between 80 and 60,000." - read -p "Enter HTTP Port for your container to listen on (80-60000) → " HTTP_PORT - HTTP_PORT_RETRIES=$(($HTTP_PORT_RETRIES-1)) - else - echo "Too many incorrect attempts. Exiting..." - exit 6 - fi + if [ "${GH_ACTION^^}" == "Y" ]; then + outputError "Invalid HTTP Port. Must be between 80 and 60,000." + writeLog "Invalid HTTP port entered: $HTTP_PORT (GH_ACTION mode)" + exit 6 + fi + echo "āŒ Invalid HTTP Port. It must be a number between 80 and 60,000." + writeLog "Invalid HTTP port entered: $HTTP_PORT" + read -p "Enter HTTP Port for your container to listen on (80-60000) → " HTTP_PORT done echo "āœ… HTTP Port is set to $HTTP_PORT" @@ -207,12 +179,13 @@ while [ "${USE_OTHER_PROTOCOLS^^}" != "Y" ] && [ "${USE_OTHER_PROTOCOLS^^}" != " read -p "Does your Container require any protocols other than SSH and HTTP? (y/n) → " USE_OTHER_PROTOCOLS done -RANDOM_NUM=$(shuf -i 100000-999999 -n 1) -PROTOCOL_BASE_FILE="protocol_list_$RANDOM_NUM.txt" -PROTOCOL_FILE="/root/bin/protocols/$PROTOCOL_BASE_FILE" -touch "$PROTOCOL_FILE" - if [ "${USE_OTHER_PROTOCOLS^^}" == "Y" ]; then + + RANDOM_NUM=$(shuf -i 100000-999999 -n 1) + PROTOCOL_BASE_FILE="protocol_list_$RANDOM_NUM.txt" + PROTOCOL_FILE="/root/bin/protocols/$PROTOCOL_BASE_FILE" + touch "$PROTOCOL_FILE" + LIST_PROTOCOLS=() read -p "Enter the protocol abbreviation (e.g, LDAP for Lightweight Directory Access Protocol). Type \"e\" to exit → " PROTOCOL_NAME while [ "${PROTOCOL_NAME^^}" != "E" ]; do @@ -244,7 +217,6 @@ if [ "${USE_OTHER_PROTOCOLS^^}" == "Y" ]; then done fi - # Attempt to deploy application on start. if [ -z "$DEPLOY_ON_START" ]; then @@ -256,6 +228,12 @@ while [ "${DEPLOY_ON_START^^}" != "Y" ] && [ "${DEPLOY_ON_START^^}" != "N" ] && read -p "šŸš€ Do you want to deploy your project automatically? (y/n) → " DEPLOY_ON_START done +if [ "${GH_ACTION^^}" == "Y" ]; then + if [ ! -z "${RUNTIME_LANGUAGE^^}" ]; then + DEPLOY_ON_START="Y" + fi +fi + if [ "${DEPLOY_ON_START^^}" == "Y" ]; then source /root/bin/deploy-application.sh fi @@ -291,27 +269,38 @@ echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━ echo -e "${BOLD}${MAGENTA}šŸš€ Starting Container Creation...${RESET}" echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -ssh -t root@10.15.0.4 "bash -c '/var/lib/vz/snippets/clone-clxc.sh \ -'\''$CONTAINER_NAME'\'' \ -'\''$CONTAINER_PASSWORD'\'' \ -'\''$HTTP_PORT'\'' \ -'\''$PROXMOX_USERNAME'\'' \ -'\''$PUB_FILE'\'' \ -'\''$PROTOCOL_BASE_FILE'\'' \ -'\''$DEPLOY_ON_START'\'' \ -'\''$PROJECT_REPOSITORY'\'' \ -'\''$PROJECT_BRANCH'\'' \ -'\''$PROJECT_ROOT'\'' \ -'\''$INSTALL_COMMAND'\'' \ -'\''$BUILD_COMMAND'\'' \ -'\''$START_COMMAND'\'' \ -'\''$RUNTIME_LANGUAGE'\'' \ -'\''$ENV_FOLDER'\'' \ -'\''$SERVICES_FILE'\'' \ -'\''$LINUX_DISTRIBUTION'\'' \ -'\''$MULTI_COMPONENT'\'' -'\''$ROOT_START_COMMAND'\'' '" - +# Encode JSON variables +INSTALL_COMMAND_B64=$(echo -n "$INSTALL_COMMAND" | base64) +BUILD_COMMAND_B64=$(echo -n "$BUILD_COMMAND" | base64) +RUNTIME_LANGUAGE_B64=$(echo -n "$RUNTIME_LANGUAGE" | base64) +START_COMMAND_B64=$(echo -n "$START_COMMAND" | base64) + +REMOTE_CMD=( +/var/lib/vz/snippets/create-container.sh +"$CONTAINER_NAME" +"$GH_ACTION" +"$HTTP_PORT" +"$PROXMOX_USERNAME" +"$PUB_FILE" +"$PROTOCOL_BASE_FILE" +"$DEPLOY_ON_START" +"$PROJECT_REPOSITORY" +"$PROJECT_BRANCH" +"$PROJECT_ROOT" +"$INSTALL_COMMAND_B64" +"$BUILD_COMMAND_B64" +"$START_COMMAND_B64" +"$RUNTIME_LANGUAGE_B64" +"$ENV_FOLDER" +"$SERVICES_FILE" +"$LINUX_DISTRIBUTION" +"$MULTI_COMPONENT" +"$ROOT_START_COMMAND" +) + +QUOTED_REMOTE_CMD=$(printf ' %q' "${REMOTE_CMD[@]}") + +ssh -t root@10.15.0.4 "bash -c \"$QUOTED_REMOTE_CMD\"" rm -rf "$PROTOCOL_FILE" rm -rf "$TEMP_PUB_FILE" @@ -319,5 +308,5 @@ rm -rf "$TEMP_SERVICES_FILE_PATH" rm -rf "$ENV_FOLDER_PATH" unset CONFIRM_PASSWORD -unset CONTAINER_PASSWORD unset PUBLIC_KEY +unset PROXMOX_PASSWORD diff --git a/container creation/protocols/master_protocol_list.txt b/container creation/protocols/master_protocol_list.txt new file mode 100644 index 00000000..ba1d16e4 --- /dev/null +++ b/container creation/protocols/master_protocol_list.txt @@ -0,0 +1,145 @@ +TCPM 1 tcp +RJE 5 tcp +ECHO 7 tcp +DISCARD 9 tcp +DAYTIME 13 tcp +QOTD 17 tcp +MSP 18 tcp +CHARGEN 19 tcp +FTP 20 tcp +FTP 21 tcp +SSH 22 tcp +TELNET 23 tcp +SMTP 25 tcp +TIME 37 tcp +HNS 42 tcp +WHOIS 43 tcp +TACACS 49 tcp +DNS 53 tcp +BOOTPS 67 udp +BOOTPC 68 udp +TFTP 69 udp +GOPHER 70 tcp +FINGER 79 tcp +HTTP 80 tcp +KERBEROS 88 tcp +HNS 101 tcp +ISO-TSAP 102 tcp +POP2 109 tcp +POP3 110 tcp +RPC 111 tcp +AUTH 113 tcp +SFTP 115 tcp +UUCP-PATH 117 tcp +NNTP 119 tcp +NTP 123 udp +EPMAP 135 tcp +NETBIOS-NS 137 tcp +NETBIOS-DGM 138 udp +NETBIOS-SSN 139 tcp +IMAP 143 tcp +SQL-SRV 156 tcp +SNMP 161 udp +SNMPTRAP 162 udp +XDMCP 177 tcp +BGP 179 tcp +IRC 194 tcp +LDAP 389 tcp +NIP 396 tcp +HTTPS 443 tcp +SNPP 444 tcp +SMB 445 tcp +KPASSWD 464 tcp +SMTPS 465 tcp +ISAKMP 500 udp +EXEC 512 tcp +LOGIN 513 tcp +SYSLOG 514 udp +LPD 515 tcp +TALK 517 udp +NTALK 518 udp +RIP 520 udp +RIPNG 521 udp +RPC 530 tcp +UUCP 540 tcp +KLOGIN 543 tcp +KSHELL 544 tcp +DHCPV6-C 546 tcp +DHCPV6-S 547 tcp +AFP 548 tcp +RTSP 554 tcp +NNTPS 563 tcp +SUBMISSION 587 tcp +IPP 631 tcp +LDAPS 636 tcp +LDP 646 tcp +LINUX-HA 694 tcp +ISCSI 860 tcp +RSYNC 873 tcp +VMWARE 902 tcp +FTPS-DATA 989 tcp +FTPS 990 tcp +TELNETS 992 tcp +IMAPS 993 tcp +POP3S 995 tcp +SOCKS 1080 tcp +OPENVPN 1194 udp +OMGR 1311 tcp +MS-SQL-S 1433 tcp +MS-SQL-M 1434 udp +WINS 1512 tcp +ORACLE-SQL 1521 tcp +RADIUS 1645 tcp +RADIUS-ACCT 1646 tcp +L2TP 1701 udp +PPTP 1723 tcp +CISCO-ISL 1741 tcp +RADIUS 1812 udp +RADIUS-ACCT 1813 udp +NFS 2049 tcp +CPANEL 2082 tcp +CPANEL-SSL 2083 tcp +WHM 2086 tcp +WHM-SSL 2087 tcp +DA 2222 tcp +ORACLE-DB 2483 tcp +ORACLE-DBS 2484 tcp +XBOX 3074 tcp +HTTP-PROXY 3128 tcp +MYSQL 3306 tcp +RDP 3389 tcp +NDPS-PA 3396 tcp +SVN 3690 tcp +MSQL 4333 udp +METASPLOIT 4444 tcp +EMULE 4662 tcp +EMULE 4672 udp +RADMIN 4899 tcp +UPNP 5000 tcp +YMSG 5050 tcp +SIP 5060 tcp +SIP-TLS 5061 tcp +AIM 5190 tcp +XMPP-CLIENT 5222 tcp +XMPP-CLIENTS 5223 tcp +XMPP-SERVER 5269 tcp +POSTGRES 5432 tcp +VNC 5500 tcp +VNC-HTTP 5800 tcp +VNC 5900 tcp +X11 6000 tcp +BNET 6112 tcp +GNUTELLA 6346 tcp +SANE 6566 tcp +IRC 6667 tcp +IRCS 6697 tcp +BT 6881 tcp +HTTP-ALT 8000 tcp +HTTP-ALT 8008 tcp +HTTP-ALT 8080 tcp +HTTPS-ALT 8443 tcp +PDL-DS 9100 tcp +BACNET 9101 tcp +WEBMIN 10000 udp +MONGO 27017 tcp +TRACEROUTE 33434 udp \ No newline at end of file diff --git a/container creation/services/service_map.json b/container creation/services/service_map_debian.json similarity index 56% rename from container creation/services/service_map.json rename to container creation/services/service_map_debian.json index 8f7618f4..2c99c524 100644 --- a/container creation/services/service_map.json +++ b/container creation/services/service_map_debian.json @@ -3,66 +3,66 @@ "curl https://install.meteor.com/ | sh" ], "mongodb": [ - "sudo apt-get update", - "sudo apt-get install -y gnupg curl", + "sudo apt update -y", + "sudo apt install -y gnupg curl", "curl -fsSL https://pgp.mongodb.com/server-7.0.asc | sudo gpg --dearmor -o /usr/share/keyrings/mongodb-server-7.0.gpg", "echo \"deb [ signed-by=/usr/share/keyrings/mongodb-server-7.0.gpg ] https://repo.mongodb.org/apt/debian bookworm/mongodb-org/7.0 main\" | sudo tee /etc/apt/sources.list.d/mongodb-org-7.0.list", - "sudo apt-get update", - "sudo apt-get install -y mongodb-org", + "sudo apt update -y", + "sudo apt install -y mongodb-org", "sudo systemctl enable mongod", "sudo systemctl start mongod" ], "redis": [ - "sudo apt-get update", - "sudo apt-get install -y redis-server", + "sudo apt update -y", + "sudo apt install -y redis-server", "sudo systemctl enable redis-server", "sudo systemctl start redis-server" ], "postgresql": [ - "sudo apt-get update", - "sudo apt-get install -y postgresql postgresql-contrib", + "sudo apt update -y", + "sudo apt install -y postgresql postgresql-contrib", "sudo systemctl enable postgresql", "sudo systemctl start postgresql" ], "apache": [ - "sudo apt-get update", - "sudo apt-get install -y apache2", + "sudo apt update -y", + "sudo apt install -y apache2", "sudo systemctl enable apache2", "sudo systemctl start apache2" ], "nginx": [ - "sudo apt-get update", - "sudo apt-get install -y nginx", + "sudo apt update -y", + "sudo apt install -y nginx", "sudo systemctl enable nginx", "sudo systemctl start nginx" ], "docker": [ - "sudo apt-get update", - "sudo apt-get install -y lsb-release", - "sudo apt-get install -y ca-certificates curl gnupg lsb-release", + "sudo apt update -y", + "sudo apt install -y lsb-release", + "sudo apt install -y ca-certificates curl gnupg lsb-release", "sudo install -m 0755 -d /etc/apt/keyrings", "curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg", - "echo \"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable\" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null", - "sudo apt-get update", - "sudo apt-get install -y docker-ce docker-ce-cli containerd.io", + "echo 'deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian bookworm stable' | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null", + "sudo apt update -y", + "sudo apt install -y docker-ce docker-ce-cli containerd.io", "sudo systemctl enable docker", "sudo systemctl start docker" ], "rabbitmq": [ - "sudo apt-get update", - "sudo apt-get install -y rabbitmq-server", + "sudo apt update -y", + "sudo apt install -y rabbitmq-server", "sudo systemctl enable rabbitmq-server", "sudo systemctl start rabbitmq-server" ], "memcached": [ - "sudo apt-get update", - "sudo apt-get install -y memcached", + "sudo apt update -y", + "sudo apt install -y memcached", "sudo systemctl enable memcached", "sudo systemctl start memcached" ], "mariadb": [ - "sudo apt-get update", - "sudo apt-get install -y mariadb-server", + "sudo apt update -y", + "sudo apt install -y mariadb-server", "sudo systemctl enable mariadb", "sudo systemctl start mariadb" ] diff --git a/container creation/services/service_map_rocky.json b/container creation/services/service_map_rocky.json new file mode 100644 index 00000000..1b661433 --- /dev/null +++ b/container creation/services/service_map_rocky.json @@ -0,0 +1,99 @@ +{ + "meteor": [ + "dnf install tar -y", + "curl https://install.meteor.com/ | sh" + ], + "mongodb": [ + "sudo dnf install -y epel-release", + "sudo dnf update -y", + "sudo dnf install -y gnupg curl", + "curl -fsSL https://pgp.mongodb.com/server-7.0.asc | sudo gpg --dearmor -o /etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb", + "echo '[mongodb-org-7.0]' | sudo tee /etc/yum.repos.d/mongodb-org-7.0.repo", + "echo 'name=MongoDB Repository' >> /etc/yum.repos.d/mongodb-org-7.0.repo", + "echo 'baseurl=https://repo.mongodb.org/yum/redhat/9/mongodb-org/7.0/x86_64/' >> /etc/yum.repos.d/mongodb-org-7.0.repo", + "echo 'gpgcheck=1' >> /etc/yum.repos.d/mongodb-org-7.0.repo", + "echo 'enabled=1' >> /etc/yum.repos.d/mongodb-org-7.0.repo", + "echo 'gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-mongodb' >> /etc/yum.repos.d/mongodb-org-7.0.repo", + "sudo dnf install -y mongodb-org", + "sudo systemctl enable mongod", + "sudo systemctl start mongod" + ], + "redis": [ + "sudo dnf install -y epel-release", + "sudo dnf update -y", + "sudo dnf install -y redis", + "sudo systemctl enable redis", + "sudo systemctl start redis" + ], + "postgresql": [ + "sudo dnf install -y epel-release", + "sudo dnf update -y", + "sudo dnf install -y postgresql-server postgresql-contrib", + "sudo postgresql-setup --initdb", + "sudo systemctl enable postgresql", + "sudo systemctl start postgresql" + ], + "apache": [ + "sudo dnf install -y epel-release", + "sudo dnf update -y", + "sudo dnf install -y httpd", + "sudo systemctl enable httpd", + "sudo systemctl start httpd" + ], + "httpd": [ + "sudo dnf install -y epel-release", + "sudo dnf update -y", + "sudo dnf install -y httpd", + "sudo systemctl enable httpd", + "sudo systemctl start httpd" + ], + "nginx": [ + "sudo dnf install -y epel-release", + "sudo dnf update -y", + "sudo dnf install -y nginx", + "sudo systemctl enable nginx", + "sudo systemctl start nginx" + ], + "docker": [ + "sudo dnf update -y", + "sudo dnf install -y yum-utils device-mapper-persistent-data lvm2", + "sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo", + "sudo dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin", + "sudo systemctl enable docker", + "sudo systemctl start docker" + ], + "rabbitmq": [ + "sudo dnf install -y epel-release", + "sudo dnf install -y erlang", + "sudo dnf update -y", + "rpm --import 'https://github.com/rabbitmq/signing-keys/releases/download/3.0/rabbitmq-release-signing-key.asc'", + "rpm --import 'https://github.com/rabbitmq/signing-keys/releases/download/3.0/cloudsmith.rabbitmq-server.9F4587F226208342.key'", + "echo '[rabbitmq-server]' | sudo tee /etc/yum.repos.d/rabbitmq_rabbitmq-server.repo", + "echo 'name=rabbitmq-server' | sudo tee -a /etc/yum.repos.d/rabbitmq_rabbitmq-server.repo", + "echo 'baseurl=https://packagecloud.io/rabbitmq/rabbitmq-server/el/9/$basearch' | sudo tee /etc/yum.repos.d/rabbitmq_rabbitmq-server.repo", + "echo 'repo_gpgcheck=1' | sudo tee -a /etc/yum.repos.d/rabbitmq_rabbitmq-server.repo", + "echo 'gpgcheck=1' | sudo tee -a /etc/yum.repos.d/rabbitmq_rabbitmq-server.repo", + "echo 'enabled=1' | sudo tee -a /etc/yum.repos.d/rabbitmq_rabbitmq-server.repo", + "echo 'gpgkey=https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey' | sudo tee -a /etc/yum.repos.d/rabbitmq_rabbitmq-server.repo", + "echo 'sslverify=1' | sudo tee -a /etc/yum.repos.d/rabbitmq_rabbitmq-server.repo", + "echo 'sslcacert=/etc/pki/tls/certs/ca-bundle.crt' | sudo tee -a /etc/yum.repos.d/rabbitmq_rabbitmq-server.repo", + "echo 'metadata_expire=300' | sudo tee -a /etc/yum.repos.d/rabbitmq_rabbitmq-server.repo", + "sudo dnf install -y rabbitmq-server", + "sudo systemctl enable rabbitmq-server", + "sudo systemctl start rabbitmq-server" + ], + "memcached": [ + "sudo dnf install -y epel-release", + "sudo dnf update -y", + "sudo dnf install -y memcached", + "sudo systemctl enable memcached", + "sudo systemctl start memcached" + ], + "mariadb": [ + "sudo dnf install -y epel-release", + "sudo dnf update -y", + "sudo dnf install -y mariadb-server", + "sudo systemctl enable mariadb", + "sudo systemctl start mariadb" + ] +} \ No newline at end of file diff --git a/proxmox-launchpad b/proxmox-launchpad new file mode 160000 index 00000000..038aff5a --- /dev/null +++ b/proxmox-launchpad @@ -0,0 +1 @@ +Subproject commit 038aff5ad0eacd9f77935ae8819ab59da13fc981 From 189f93809cd8e7184af77d32a530b1066fdfb12c Mon Sep 17 00:00:00 2001 From: maxklema Date: Tue, 5 Aug 2025 15:50:00 -0400 Subject: [PATCH 4/8] READMEs in each directory, re-organization, updated ci-cd files --- container creation/create-container.sh | 5 +- container creation/setup-runner.sh | 48 +++- .../check-container-exists.sh | 40 --- container maintenance/delete-container.sh | 29 --- .../helper-scripts/PVE_user_authentication.sh | 35 --- .../helper-scripts/create-template.sh | 56 ---- .../helper-scripts/delete-runner.sh | 44 ---- .../helper-scripts/repository_status.sh | 37 --- .../verify_container_ownership.sh | 37 --- container maintenance/start_services.sh | 144 ----------- container maintenance/update-container.sh | 242 ------------------ container registration/register-container.sh | 194 -------------- container registration/register_proxy_hook.sh | 17 -- intern-phxdc-pve1/prune_iptables.sh | 173 ------------- intern-phxdc-pve1/prune_temp_files.sh | 67 ----- jump server/extract-fingerprint.sh | 46 ---- nginx proxy/nginx.conf | 29 --- nginx proxy/port_map.js | 94 ------- nginx proxy/reverse_proxy.conf | 76 ------ proxmox-launchpad | 1 - 20 files changed, 38 insertions(+), 1376 deletions(-) delete mode 100644 container maintenance/check-container-exists.sh delete mode 100644 container maintenance/delete-container.sh delete mode 100644 container maintenance/helper-scripts/PVE_user_authentication.sh delete mode 100755 container maintenance/helper-scripts/create-template.sh delete mode 100644 container maintenance/helper-scripts/delete-runner.sh delete mode 100644 container maintenance/helper-scripts/repository_status.sh delete mode 100644 container maintenance/helper-scripts/verify_container_ownership.sh delete mode 100644 container maintenance/start_services.sh delete mode 100644 container maintenance/update-container.sh delete mode 100644 container registration/register-container.sh delete mode 100644 container registration/register_proxy_hook.sh delete mode 100644 intern-phxdc-pve1/prune_iptables.sh delete mode 100644 intern-phxdc-pve1/prune_temp_files.sh delete mode 100644 jump server/extract-fingerprint.sh delete mode 100644 nginx proxy/nginx.conf delete mode 100644 nginx proxy/port_map.js delete mode 100644 nginx proxy/reverse_proxy.conf delete mode 160000 proxmox-launchpad diff --git a/container creation/create-container.sh b/container creation/create-container.sh index 39098df3..17d2148b 100644 --- a/container creation/create-container.sh +++ b/container creation/create-container.sh @@ -1,6 +1,7 @@ #!/bin/bash # Script to create the pct container, run register container, and migrate container accordingly. -# Last Modified by July 23rd, 2025 by Maxwell Klema +# Last Modified by August 5th, 2025 by Maxwell Klema +# ----------------------------------------------------- BOLD='\033[1m' BLUE='\033[34m' @@ -34,7 +35,7 @@ echoContainerDetails() { echo -e "🌐 ${MAGENTA}Internal IP :${RESET} $CONTAINER_IP" echo -e "šŸ”— ${GREEN}Domain Name :${RESET} https://$CONTAINER_NAME.opensource.mieweb.org" echo -e "šŸ› ļø ${BLUE}SSH Access :${RESET} ssh -p $SSH_PORT $PROXMOX_USERNAME@$CONTAINER_NAME.opensource.mieweb.org" - echo -e "šŸ”‘ ${BLUE}Container Password :${RESET} Your proxmox account password" + echo -e "šŸ”‘ ${BLUE}Container Password :${RESET} Your proxmox account password" echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" echo -e "${BOLD}${MAGENTA}NOTE: Additional background scripts are being ran in detached terminal sessions.${RESET}" echo -e "${BOLD}${MAGENTA}Wait up to two minutes for all processes to complete.${RESET}" diff --git a/container creation/setup-runner.sh b/container creation/setup-runner.sh index 7b2a701e..382f4f87 100644 --- a/container creation/setup-runner.sh +++ b/container creation/setup-runner.sh @@ -1,8 +1,16 @@ #!/bin/bash # A script for cloning a Distro template, installing, and starting a runner on it. -# Last Modified by Maxwell Klema on July 20th, 2025 +# Last Modified by Maxwell Klema on August 5th, 2025 # ------------------------------------------------ +outputError() { + echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + echo -e "${BOLD}${MAGENTA}āŒ Script Failed. Exiting... ${RESET}" + echo -e "$2" + echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + exit $1 +} + BOLD='\033[1m' RESET='\033[0m' @@ -15,10 +23,8 @@ echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━ source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh #Authenticate User source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh #Ensure container does not exist. -CONTAINER_EXISTS=$? - -if [ "$CONTAINER_EXISTS" != 1 ]; then - exit $CONTAINER_EXISTS; # Container is not free to user, either someone else owns it or the user owns it. +if [ ! -z "$CONTAINER_OWNERSHIP" ]; then + outputError 1 "You already own a container with name \"$CONTAINER_NAME\". Please delete it before creating a new one." fi # Cloning Container Template and Setting it up ===== @@ -43,6 +49,10 @@ if [ -z "$CTID_TEMPLATE" ]; then esac fi +if [ "${LINUX_DISTRIBUTION^^}" != "ROCKY" ]; then + LINUX_DISTRIBUTION="DEBIAN" +fi + REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") REPO_BASE_NAME_WITH_OWNER=$(echo "$PROJECT_REPOSITORY" | cut -d'/' -f4) @@ -58,7 +68,10 @@ pct clone $CTID_TEMPLATE $NEXT_ID \ echo "ā³ Setting Container Properties..." pct set $NEXT_ID \ --tags "$PROXMOX_USERNAME" \ - --onboot 1 > /dev/null 2>&1 + --tags "$LINUX_DISTRIBUTION" \ + --onboot 1 \ + --cores 4 \ + --memory 4096 > /dev/null 2>&1 pct start $NEXT_ID > /dev/null 2>&1 pveum aclmod /vms/$NEXT_ID --user "$PROXMOX_USERNAME@pve" --role PVEVMUser > /dev/null 2>&1 @@ -71,16 +84,25 @@ CONTAINER_IP=$(pct exec $NEXT_ID -- hostname -I | awk '{print $1}') # Get Temporary Token echo "šŸŖ™ Getting Authentication Token..." -AUTH_TOKEN_RESPONSE=$(curl --location --request POST https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners/registration-token --header "Authorization: token $GITHUB_PAT") -TOKEN=$(echo "$AUTH_TOKEN_RESPONSE" | jq -r '.token') +AUTH_TOKEN_RESPONSE=$(curl --location --request POST https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners/registration-token --header "Authorization: token $GITHUB_PAT" --write-out "HTTPSTATUS:%{http_code}" --silent) + +HTTP_STATUS=$(echo "$AUTH_TOKEN_RESPONSE" | grep -o "HTTPSTATUS:[0-9]*" | cut -d: -f2) +AUTH_TOKEN_BODY=$(echo "$AUTH_TOKEN_RESPONSE" | sed 's/HTTPSTATUS:[0-9]*$//') + +if [ "$HTTP_STATUS" != "201" ]; then + outputError 1 "Failed to get GitHub authentication token. HTTP Status: $HTTP_STATUS\nResponse: $AUTH_TOKEN_BODY" +fi + +TOKEN=$(echo "$AUTH_TOKEN_BODY" | jq -r '.token') -pct enter $NEXT_ID < /dev/null -rm -rf /root/container-updates.log | true && \ +pct enter $NEXT_ID < /dev/null 2>&1 +rm -rf /root/container-updates.log || true && \ cd /actions-runner && export RUNNER_ALLOW_RUNASROOT=1 && \ -export runProcess=\$(ps aux | grep run.sh | awk '{print \$2}' | head -n 1) && kill -9 \$runProcess || true && \ -rm -rf .runner .credentials && rm -rf _work/* /var/log/runner/* && \ +runProcess=\$(ps aux | grep "[r]un.sh" | awk '{print \$2}' | head -n 1) && \ +if [ ! -z "\$runProcess" ]; then kill -9 \$runProcess || true; fi && \ +rm -rf .runner .credentials && rm -rf _work/* /var/log/runner/* 2>/dev/null || true && \ export RUNNER_ALLOW_RUNASROOT=1 && \ -./config.sh --url $PROJECT_REPOSITORY --token $TOKEN --labels $CONTAINER_NAME --name $CONTAINER_NAME +./config.sh --url $PROJECT_REPOSITORY --token $TOKEN --labels $CONTAINER_NAME --name $CONTAINER_NAME --unattended EOF # Generate RSA Keys ===== diff --git a/container maintenance/check-container-exists.sh b/container maintenance/check-container-exists.sh deleted file mode 100644 index c2aded15..00000000 --- a/container maintenance/check-container-exists.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# Script to check if a container exists -# Last Modified by Maxwell Klema on July 13th, 2025 -# ----------------------------------------------------- - -RESET="\033[0m" -BOLD="\033[1m" -MAGENTA='\033[35m' - -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo -e "${BOLD}${MAGENTA}šŸ”Ž Check Container Exists ${RESET}" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - -set +e -TYPE_RUNNER="true" -source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh -source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh - -STATUS=$? - -if [ "$STATUS" != 0 ]; then - exit 1; -fi - -REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") - -# Check if repository folder is present. -if [ "$PVE1" == "true" ]; then - if pct exec $CONTAINER_ID -- test -f /root/container-updates.log; then - exit 2; # Update Repository - else - exit 0; # Clone Repository - fi -else - if ssh 10.15.0.5 "pct exec $CONTAINER_ID -- test -f /root/container-updates.log"; then - exit 2; # Update Repository - else - exit 0; # Clone Repository - fi -fi \ No newline at end of file diff --git a/container maintenance/delete-container.sh b/container maintenance/delete-container.sh deleted file mode 100644 index c3538ce3..00000000 --- a/container maintenance/delete-container.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Script to delete a container permanently -# Last Modified by Maxwell Klema on July 13th, 2025 -# ----------------------------------------------------- - -RESET="\033[0m" -BOLD="\033[1m" -MAGENTA='\033[35m' - -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo -e "${BOLD}${MAGENTA}šŸ—‘ļø Delete Container ${RESET}" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - -CMD=( -bash /var/lib/vz/snippets/helper-scripts/delete-runner.sh -"$PROJECT_REPOSITORY" -"$GITHUB_PAT" -"$PROXMOX_USERNAME" -"$PROXMOX_PASSWORD" -"$CONTAINER_NAME" -) - -# Safely quote each argument for the shell -QUOTED_CMD=$(printf ' %q' "${CMD[@]}") - -tmux new-session -d -s delete-runner "$QUOTED_CMD" - -echo "āœ… Container with name \"$CONTAINER_NAME\" will been permanently deleted." -exit 0 # Container Deleted Successfully \ No newline at end of file diff --git a/container maintenance/helper-scripts/PVE_user_authentication.sh b/container maintenance/helper-scripts/PVE_user_authentication.sh deleted file mode 100644 index c751aadc..00000000 --- a/container maintenance/helper-scripts/PVE_user_authentication.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash -# Script that checks if a user is authenticated in Proxmox PVE Realm @ opensource.mieweb.org -# Last Modified by Maxwell Klema on July 13th, 2025 -# ----------------------------------------------------- - -# Authenticate User (Only Valid Users can Create Containers) - -if [ -z "$PROXMOX_USERNAME" ]; then - read -p "Enter Proxmox Username → " PROXMOX_USERNAME -fi - -if [ -z "$PROXMOX_PASSWORD" ]; then - read -sp "Enter Proxmox Password → " PROXMOX_PASSWORD - echo "" -fi - -USER_AUTHENTICATED=$(ssh root@10.15.234.122 "node /root/bin/js/runner.js authenticateUser \"$PROXMOX_USERNAME\" \"$PROXMOX_PASSWORD\"") -RETRIES=3 - -while [ $USER_AUTHENTICATED == 'false' ]; do - if [ $RETRIES -gt 0 ]; then - echo "āŒ Authentication Failed. Try Again" - read -p "Enter Proxmox Username → " PROXMOX_USERNAME - read -sp "Enter Proxmox Password → " PROXMOX_PASSWORD - echo "" - - USER_AUTHENTICATED=$(ssh root@10.15.234.122 "node /root/bin/js/runner.js authenticateUser \"$PROXMOX_USERNAME\" \"$PROXMOX_PASSWORD\"") - RETRIES=$(($RETRIES-1)) - else - echo "Too many incorrect attempts. Exiting..." - exit 2 - fi -done - -echo "šŸŽ‰ Your proxmox account, $PROXMOX_USERNAME@pve, has been authenticated" \ No newline at end of file diff --git a/container maintenance/helper-scripts/create-template.sh b/container maintenance/helper-scripts/create-template.sh deleted file mode 100755 index 2f80b706..00000000 --- a/container maintenance/helper-scripts/create-template.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash -# Creates a template of a LXC container -# Last modified by Maxwell Klema on July 23rd, 2025. -# -------------------------------------------------- - -if [ "${DEPLOY_ON_START^^}" != "Y" ] || [ "${GH_ACTION^^}" != "Y" ]; then - return 0 -fi - -DEFAULT_BRANCH=$(curl -s https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME | jq -r '.default_branch') - -if [ "$DEFAULT_BRANCH" != "$PROJECT_BRANCH" ]; then - return 0 -fi - -echo "šŸ“ Creating Container Template..." - -# Check if template already exists, and if it does, destroy it ===== - -TEMPLATE_NAME="template-$REPO_BASE_NAME-$REPO_BASE_NAME_WITH_OWNER" -TEMPLATE_CONTAINER_ID=$( { pct list; ssh root@10.15.0.5 'pct list'; } | awk -v name="$TEMPLATE_NAME" '$3 == name {print $1}') - -if [ ! -z "$TEMPLATE_CONTAINER_ID" ]; then - pct destroy $TEMPLATE_CONTAINER_ID | true -fi - -# Clone LXC container and convert it into a template ===== - -NEXT_ID=$(pvesh get /cluster/nextid) - -if (( $CONTAINER_ID % 2 == 101 )); then - ssh root@10.15.0.5 " - pct clone $CONTAINER_ID $NEXT_ID \ - --hostname "$TEMPLATE_NAME" \ - --full true - pct migrate $NEXT_ID intern-phxdc-pve1 --target-storage containers-pve1 - " > /dev/null 2>&1 -else - pct clone $CONTAINER_ID $NEXT_ID \ - --hostname "$TEMPLATE_NAME" \ - --full true -fi - -AUTH_TOKEN_RESPONSE=$(curl --location --request POST https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners/registration-token --header "Authorization: token $GITHUB_PAT") -TOKEN=$(echo "$AUTH_TOKEN_RESPONSE" | jq -r '.token') - -# Remove rsa keys ==== -pct start $NEXT_ID -pct enter $NEXT_ID < /dev/null 2>&1 - else - ssh root@10.15.0.5 "pct destroy $CONTAINER_ID" > /dev/null 2>&1 - fi -else - if pct status "$CONTAINER_ID" | grep -q "status: running"; then - pct stop "$CONTAINER_ID" && pct destroy "$CONTAINER_ID" > /dev/null 2>&1 - else - pct destroy "$CONTAINER_ID" > /dev/null 2>&1 - fi -fi - -source /usr/local/bin/prune_iptables.sh - -REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") -REPO_BASE_NAME_WITH_OWNER=$(echo "$PROJECT_REPOSITORY" | cut -d'/' -f4) - -RUNNERS=$(curl --location https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners --header "Authorization: token $GITHUB_PAT") - -while read -r RUNNER; do - RUNNER_NAME=$(echo "$RUNNER" | jq -r '.name') - if [ "$RUNNER_NAME" == "$CONTAINER_NAME" ]; then - RUNNER_ID=$(echo "$RUNNER" | jq -r '.id') - curl --location --request DELETE "https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners/$RUNNER_ID" \ - --header "Authorization: token $GITHUB_PAT" - fi -done < <(echo "$RUNNERS" | jq -c '.runners[]') \ No newline at end of file diff --git a/container maintenance/helper-scripts/repository_status.sh b/container maintenance/helper-scripts/repository_status.sh deleted file mode 100644 index 5ec73b76..00000000 --- a/container maintenance/helper-scripts/repository_status.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Helper script to determine if container needs to clone repository or simply update it -# Last Modified by Maxwell Klema on July 21st, 2025 -# ------------------------------------------------- - -set +e -TYPE_RUNNER="true" -source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh -source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh - -STATUS=$? - -if [ "$STATUS" != 0 ]; then - exit 1; -fi - -REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") - -# Check if repository folder is present. - -if [ "$PVE1" == "true" ]; then - if pct exec $CONTAINER_ID -- test -d /root/$REPO_BASE_NAME; then - echo "Update" - exit 2; # Update Repository - else - echo "Clone" - exit 0; # Clone Repository - fi -else - if ssh 10.15.0.5 "pct exec $CONTAINER_ID -- test -d /root/$REPO_BASE_NAME"; then - echo "Update" - exit 2; # Update Repository - else - echo "Clone" - exit 0; # Clone Repository - fi -fi \ No newline at end of file diff --git a/container maintenance/helper-scripts/verify_container_ownership.sh b/container maintenance/helper-scripts/verify_container_ownership.sh deleted file mode 100644 index 53b9e1cd..00000000 --- a/container maintenance/helper-scripts/verify_container_ownership.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Script to verify container ownership based on name and CTID -# Last Modified by Maxwell Klema on July 13th, 2025 -# ----------------------------------------------------- - -CONTAINER_NAME="${CONTAINER_NAME,,}" - -if [ -z "$CONTAINER_NAME" ]; then - read -p "Enter Container Name → " CONTAINER_NAME -fi - -CONTAINER_ID=$( { pct list; ssh root@10.15.0.5 'pct list'; } | awk -v name="$CONTAINER_NAME" '$3 == name {print $1}') - -if [ -z "$CONTAINER_ID" ]; then - echo "āœ… Container with name \"$CONTAINER_NAME\" is available for use." - return 1 -fi - -if [ "$TYPE_RUNNER" != "true" ]; then - if (( $CONTAINER_ID % 2 == 0 )); then - CONTAINER_OWNERSHIP=$(ssh root@10.15.0.5 "pct config \"$CONTAINER_ID\" | grep "tags" | grep \"$PROXMOX_USERNAME\"") - else - CONTAINER_OWNERSHIP=$(pct config "$CONTAINER_ID" | grep "tags" | grep -x "tags: $PROXMOX_USERNAME") - fi -else - CONTAINER_OWNERSHIP=$(ssh root@10.15.0.5 "pct config \"$CONTAINER_ID\" | grep "tags" | grep \"$PROXMOX_USERNAME\"") - PVE1="false" - if [ -z "$CONTAINER_OWNERSHIP" ]; then - CONTAINER_OWNERSHIP=$(pct config "$CONTAINER_ID" | grep "tags" | grep -x "tags: $PROXMOX_USERNAME") - PVE1="true" - fi -fi - -if [ -z "$CONTAINER_OWNERSHIP" ]; then - echo "āŒ You do not own the container with name \"$CONTAINER_NAME\"." - return 2 -fi diff --git a/container maintenance/start_services.sh b/container maintenance/start_services.sh deleted file mode 100644 index 02f2ad97..00000000 --- a/container maintenance/start_services.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/bin/bash -# Script ran by a virtual terminal session to start services and migrate a container -# Script is only ran on GH action workflows when runner disconnects -# Last Modified by Maxwell Klema on July 23rd, 2025 -# ------------------------------------------------ - -CONTAINER_ID="$1" -CONTAINER_NAME="$2" -REPO_BASE_NAME="$3" -REPO_BASE_NAME_WITH_OWNER="$4" -SSH_PORT="$5" -CONTAINER_IP="$6" -PROJECT_ROOT="$7" -ROOT_START_COMMAND="$8" -DEPLOY_ON_START="$9" -MULTI_COMPONENT="${10}" -START_COMMAND=$(echo "${11}" | base64 -d) -BUILD_COMMAND=$(echo "${12}" | base64 -d) -RUNTIME_LANGUAGE=$(echo "${13}" | base64 -d) -GH_ACTION="${14}" -PROJECT_BRANCH="${15}" -UPDATE_CONTAINER="${16}" -CONTAINER_NAME="${CONTAINER_NAME,,}" - -sleep 3 -if (( $CONTAINER_ID % 2 == 0 )) && [ "$UPDATE_CONTAINER" == "true" ]; then - ssh root@10.15.0.5 "pct stop $CONTAINER_ID" > /dev/null 2>&1 -else - pct stop $CONTAINER_ID > /dev/null 2>&1 -fi - -# Create template if on default branch ==== -source /var/lib/vz/snippets/helper-scripts/create-template.sh - -if (( $CONTAINER_ID % 2 == 0 )); then - - if [ "$UPDATE_CONTAINER" != "true" ]; then - pct migrate $CONTAINER_ID intern-phxdc-pve2 --target-storage containers-pve2 --online > /dev/null 2>&1 - sleep 40 # wait for migration to finish (fix this later) - fi - - ssh root@10.15.0.5 "pct start $CONTAINER_ID" - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'chmod 700 ~/.bashrc'" # enable full R/W/X permissions - ssh root@10.15.0.5 "pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4" - - if [ "${GH_ACTION^^}" == "Y" ]; then - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- systemctl start github-runner" - fi - - startProject() { - - RUNTIME="$1" - BUILD_CMD="$2" - START_CMD="$3" - COMP_DIR="$4" - - if [ "${RUNTIME^^}" == "NODEJS" ]; then - if [ "$BUILD_CMD" == "" ]; then - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'export PATH=\$PATH:/usr/local/bin && pm2 start bash -- -c \"cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $START_CMD\"'" > /dev/null 2>&1 - else - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'export PATH=\$PATH:/usr/local/bin && $BUILD_CMD && pm2 start bash -- -c \"cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $START_CMD\"'" > /dev/null 2>&1 - fi - elif [ "${RUNTIME^^}" == "PYTHON" ]; then - if [ "$BUILD_CMD" == "" ]; then - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- script -q -c \"tmux new-session -d 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate && $START_CMD'\"" > /dev/null 2>&1 - else - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- script -q -c \"tmux new-session -d 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate $BUILD_CMD && $START_CMD'\"" > /dev/null 2>&1 - fi - fi - - } - - if [ "${DEPLOY_ON_START^^}" == "Y" ]; then - if [ "${MULTI_COMPONENT^^}" == "Y" ]; then - for COMPONENT in $(echo "$START_COMMAND" | jq -r 'keys[]'); do - START=$(echo "$START_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') - RUNTIME=$(echo "$RUNTIME_LANGUAGE" | jq -r --arg k "$COMPONENT" '.[$k]') - BUILD=$(echo "$BUILD_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') - if [ "$BUILD" == "null" ]; then - BUILD="" - fi - startProject "$RUNTIME" "$BUILD" "$START" "$COMPONENT" - done - if [ ! -z "$ROOT_START_COMMAND" ]; then - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND'" > /dev/null 2>&1 - fi - else - startProject "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." - fi - fi - ssh root@10.15.0.5 "pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2" - -# PVE 1 -else - pct start $CONTAINER_ID | true - if [ "${GH_ACTION^^}" == "Y" ]; then - pct exec $CONTAINER_ID -- bash -c "systemctl start github-runner" - fi - - startComponent() { - - RUNTIME="$1" - BUILD_CMD="$2" - START_CMD="$3" - COMP_DIR="$4" - - if [ "${RUNTIME^^}" == "NODEJS" ]; then - if [ "$BUILD_CMD" == "" ]; then - pct exec $CONTAINER_ID -- bash -c "export PATH=\$PATH:/usr/local/bin && pm2 start bash -- -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $START_CMD'" > /dev/null 2>&1 - else - pct enter $CONTAINER_ID < /dev/null -export PATH=\$PATH:/usr/local/bin && \ -$BUILD_CMD || true && pm2 start bash -- -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $START_CMD' -EOF - fi - elif [ "${RUNTIME^^}" == "PYTHON" ]; then - if [ "$BUILD_CMD" == "" ]; then - pct exec $CONTAINER_ID -- script -q -c "tmux new-session -d 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate && $START_CMD'" > /dev/null 2>&1 - else - pct exec $CONTAINER_ID -- script -q -c "tmux new-session -d 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate $BUILD_CMD && $START_CMD'" > /dev/null 2>&1 - fi - fi - } - - pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 > /dev/null #temporarily bump up container resources for computation hungry processes (e.g. meteor) - if [ "${MULTI_COMPONENT^^}" == "Y" ]; then - for COMPONENT in $(echo "$START_COMMAND" | jq -r 'keys[]'); do - START=$(echo "$START_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') - RUNTIME=$(echo "$RUNTIME_LANGUAGE" | jq -r --arg k "$COMPONENT" '.[$k]') - BUILD=$(echo "$BUILD_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') - if [ "$BUILD" == "null" ]; then - BUILD="" - fi - - startComponent "$RUNTIME" "$BUILD" "$START" "$COMPONENT" - done - if [ ! -z "$ROOT_START_COMMAND" ]; then - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND" > /dev/null 2>&1 - fi - else - startComponent "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." - fi - pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 > /dev/null -fi \ No newline at end of file diff --git a/container maintenance/update-container.sh b/container maintenance/update-container.sh deleted file mode 100644 index 7fc9342a..00000000 --- a/container maintenance/update-container.sh +++ /dev/null @@ -1,242 +0,0 @@ -#!/bin/bash -# Script to automatically fetch new contents from a branch, push them to container, and restart intern -# Last Modified on July 17th, 2025 by Maxwell Klema -# ---------------------------------------- - -RESET="\033[0m" -BOLD="\033[1m" -MAGENTA='\033[35m' - -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo -e "${BOLD}${MAGENTA}šŸ”„ Update Container Contents ${RESET}" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - -if [ "${DEPLOY_ON_START^^}" != "Y" ]; then - echo "Skipping container update because there is nothing to update." - exit 0 -fi - -source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh -source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh - -# Get Project Details - -CONTAINER_NAME="${CONTAINER_NAME,,}" - -if [ -z "$PROJECT_REPOSITORY" ]; then - read -p "šŸš€ Paste the link to your project repository → " PROJECT_REPOSITORY -fi - -CheckRepository() { - PROJECT_REPOSITORY_SHORTENED=${PROJECT_REPOSITORY#*github.com/} - PROJECT_REPOSITORY_SHORTENED=${PROJECT_REPOSITORY_SHORTENED%.git} - REPOSITORY_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" https://github.com/$RROJECT_REPOSITORY) -} - - -CheckRepository - -while [ "$REPOSITORY_EXISTS" != "200" ]; do - echo "āš ļø The repository link you provided, \"$PROJECT_REPOSITORY\" was not valid." - read -p "šŸš€ Paste the link to your project repository → " PROJECT_REPOSITORY - CheckRepository -done - -echo "āœ… The repository link you provided, \"$PROJECT_REPOSITORY\", was valid." - -# Get Project Branch - -if [ "$PROJECT_BRANCH" == "" ]; then - PROJECT_BRANCH="main" -fi - -REPOSITORY_BRANCH_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" https://api.github.com/repos/$PROJECT_REPOSITORY_SHORTENED/branches/$PROJECT_BRANCH) - -REPOSITORY_BRANCH_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" $PROJECT_REPOSITORY/tree/$PROJECT_BRANCH) -while [ "$REPOSITORY_BRANCH_EXISTS" != "200" ]; do - echo "āš ļø The branch you provided, \"$PROJECT_BRANCH\", does not exist on repository at \"$PROJECT_REPOSITORY\"." - read -p "🪾 Enter the project branch to deploy from (leave blank for \"main\") → " PROJECT_BRANCH - if [ "PROJECT_BRANCH" == "" ]; then - PROJECT_BRANCH="main" - fi - REPOSITORY_BRANCH_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" $PROJECT_REPOSITORY_SHORTENED/tree/$PROJECT_BRANCH) -done - - -# # Get Project Root Directroy - -if [ "$PROJECT_ROOT" == "." ] || [ "$PROJECT_ROOT" == "" ]; then - PROJECT_ROOT="/" -fi - -VALID_PROJECT_ROOT=$(ssh root@10.15.234.122 "node /root/bin/js/runner.js authenticateRepo \"$PROJECT_REPOSITORY\" \"$PROJECT_BRANCH\" \"$PROJECT_ROOT\"") - -while [ "$VALID_PROJECT_ROOT" == "false" ]; do - echo "āš ļø The root directory you provided, \"$PROJECT_ROOT\", does not exist on branch, \"$PROJECT_BRANCH\", on repository at \"$PROJECT_REPOSITORY\"." - read -p "šŸ“ Enter the project root directory (relative to repository root directory, or leave blank for root directory) → " PROJECT_ROOT - VALID_PROJECT_ROOT=$(ssh root@10.15.234.122 "node /root/bin/js/runner.js authenticateRepo \"$PROJECT_REPOSITORY\" \"$PROJECT_BRANCH\" \"$PROJECT_ROOT\"") -done - -REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") -REPO_BASE_NAME_WITH_OWNER=$(echo "$PROJECT_REPOSITORY" | cut -d'/' -f4) - -if [ "$PROJECT_ROOT" == "" ] || [ "$PROJECT_ROOT" == "/" ]; then - PROJECT_ROOT="." -fi - -# Install Services ==== - -echo "šŸ›Žļø Installing Services..." - -# SERVICE_COMMANDS=$(ssh -o SendEnv="LINUX_DISTRIBUTION SERVICES CUSTOM_SERVICES REQUIRE_SERVICES" \ -# root@10.15.234.122 \ -# "/root/bin/deployment-scripts/gatherServices.sh true") - -# echo "$SERVICE_COMMANDS" | while read -r line; do -# pct exec $CONTAINER_ID -- bash -c "$line | true" > /dev/null 2>&1 -# done - -# Clone repository if needed ==== - -if (( "$CONTAINER_ID" % 2 == 0 )); then - ssh root@10.15.0.5 " - pct enter $CONTAINER_ID < /dev/null -fi -EOF - " -else - pct enter $CONTAINER_ID < /dev/null -fi -EOF -fi - -# Update Container with New Contents from repository ===== - -startComponentPVE1() { - - RUNTIME="$1" - BUILD_CMD="$2" - START_CMD="$3" - COMP_DIR="$4" - INSTALL_CMD="$5" - - if [ "${RUNTIME^^}" == "NODEJS" ]; then - pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/ && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull" > /dev/null 2>&1 - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $INSTALL_CMD && $BUILD_CMD" > /dev/null 2>&1 - pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 - elif [ "${RUNTIME^^}" == "PYTHON" ]; then - pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/ && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull" > /dev/null 2>&1 - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate && $INSTALL_CMD && $BUILD_CMD" > /dev/null 2>&1 - pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 - fi -} - -startComponentPVE2() { - - RUNTIME="$1" - BUILD_CMD="$2" - START_CMD="$3" - COMP_DIR="$4" - INSTALL_CMD="$5" - - if [ "${RUNTIME^^}" == "NODEJS" ]; then - ssh root@10.15.0.5 " - pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 && - pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/ && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull' > /dev/null 2>&1 - pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $INSTALL_CMD' && '$BUILD_CMD' > /dev/null 2>&1 - pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 - " - elif [ "${RUNTIME^^}" == "PYTHON" ]; then - ssh root@10.15.0.5 " - pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 && - pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull' > /dev/null 2>&1 - pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate && $INSTALL_CMD' && '$BUILD_CMD' > /dev/null 2>&1 - pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 - " - fi -} - - -if [ "${MULTI_COMPONENT^^}" == "Y" ]; then - for COMPONENT in $(echo "$START_COMMAND" | jq -r 'keys[]'); do - START=$(echo "$START_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') - RUNTIME=$(echo "$RUNTIME_LANGUAGE" | jq -r --arg k "$COMPONENT" '.[$k]') - BUILD=$(echo "$BUILD_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') - INSTALL=$(echo "$INSTALL_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') - if [ "$BUILD" == "null" ]; then - BUILD="" - fi - - if (( "$CONTAINER_ID" % 2 == 0 )); then - startComponentPVE2 "$RUNTIME" "$BUILD" "$START" "$COMPONENT" "$INSTALL" - else - startComponentPVE1 "$RUNTIME" "$BUILD" "$START" "$COMPONENT" "$INSTALL" - fi - done - if [ ! -z "$ROOT_START_COMMAND" ]; then - if (( $CONTAINER_ID % 2 == 0 )); then - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND'" - else - pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND" - fi - fi - # startComponent "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." -else - if (( $CONTAINER_ID % 2 == 0 )); then - startComponentPVE2 "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." "$INSTALL_COMMAND" - else - startComponentPVE1 "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." "$INSTALL_COMMAND" - fi -fi - -# Update Log File - -if (( "$CONTAINER_ID" % 2 == 0 )); then - ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'echo \"[$(date)]\" >> /root/container-updates.log'" -else - pct exec $CONTAINER_ID -- bash -c "echo \"[$(date)]\" >> /root/container-updates.log" -fi - -# Create new template if on default branch ===== - -UPDATE_CONTAINER="true" -BUILD_COMMAND_B64=$(echo -n "$BUILD_COMMAND" | base64) -RUNTIME_LANGUAGE_B64=$(echo -n "$RUNTIME_LANGUAGE" | base64) -START_COMMAND_B64=$(echo -n "$START_COMMAND" | base64) - -CMD=( -bash /var/lib/vz/snippets/start_services.sh -"$CONTAINER_ID" -"$CONTAINER_NAME" -"$REPO_BASE_NAME" -"$REPO_BASE_NAME_WITH_OWNER" -"$SSH_PORT" -"$CONTAINER_IP" -"$PROJECT_ROOT" -"$ROOT_START_COMMAND" -"$DEPLOY_ON_START" -"$MULTI_COMPONENT" -"$START_COMMAND_B64" -"$BUILD_COMMAND_B64" -"$RUNTIME_LANGUAGE_B64" -"$GH_ACTION" -"$PROJECT_BRANCH" -"$GITHUB_PAT" -"$UPDATE_CONTAINER" -) - -# Safely quote each argument for the shell -QUOTED_CMD=$(printf ' %q' "${CMD[@]}") - -tmux new-session -d -s "$CONTAINER_NAME" "$QUOTED_CMD" -echo "āœ… Container $CONTAINER_ID has been updated with new contents from branch \"$PROJECT_BRANCH\" on repository \"$PROJECT_REPOSITORY\"." -exit 0 - diff --git a/container registration/register-container.sh b/container registration/register-container.sh deleted file mode 100644 index 83a3cdd4..00000000 --- a/container registration/register-container.sh +++ /dev/null @@ -1,194 +0,0 @@ -#!/bin/bash -# var/lib/vz/snippets/register-container.sh -# Script to register a container's IP and ports in the NGINX port map JSON file. -# Last Modified June 27 2025 by Maxwell Klema - -set -euo pipefail - -if [[ -z "${1-}" || -z "${2-}" ]]; then - echo "Usage: $0 " - exit 1 -fi - -CTID="$1" -http_port="$2" -ADDITIONAL_PROTOCOLS="${3-}" #set to empty string if not passed - -# Redirect stdout and stderr to a log file -LOGFILE="/var/log/pve-hook-$CTID.log" -exec > >(tee -a "$LOGFILE") 2>&1 - -echo "---- Hookscript started at $(date) ----" -echo "ā³ Waiting for container to boot and get DHCP lease..." -#sleep 10 - -# Extract IP -container_ip="" -attempts=0 -max_attempts=5 - -while [[ -z "$container_ip" && $attempts -lt $max_attempts ]]; do - container_ip=$(pct exec "$CTID" -- ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d'/' -f1) - [[ -z "$container_ip" ]] && sleep 2 && ((attempts++)) -done - -if [[ -z "$container_ip" ]]; then - echo "āŒ Failed to obtain IP for container $CTID" - exit 1 -fi - -hostname=$(pct exec "$CTID" -- hostname) - -# Check if this container already has a SSH port assigned in PREROUTING - -existing_ssh_port=$(iptables -t nat -S PREROUTING | grep "to-destination $container_ip:22" | awk -F'--dport ' '{print $2}' | awk '{print $1}' | head -n 1 || true) - -if [[ -n "$existing_ssh_port" ]]; then - echo "ā„¹ļø Container already has SSH port $existing_ssh_port" - ssh_port="$existing_ssh_port" -else - # Get used SSH ports - used_ssh_ports=$(iptables -t nat -S PREROUTING | awk -F'--dport ' '/--dport / {print $2}' | awk '/22$/' | awk '{print $1}') - ssh_port=$(comm -23 <(seq 2222 2999 | sort) <(echo "$used_ssh_ports" | sort) | head -n 1) - - if [[ -z "$ssh_port" ]]; then - echo "āŒ No available SSH ports found" - exit 2 - fi - - # Add PREROUTING rule - iptables -t nat -A PREROUTING -i vmbr0 -p tcp --dport "$ssh_port" -j DNAT --to-destination "$container_ip:22" - - # Add POSTROUTING rule - iptables -t nat -A POSTROUTING -o vmbr0 -p tcp -d "$container_ip" --dport 22 -j MASQUERADE -fi - -# Take input file of protocols, check if the container already has a port assigned for those protocols in PREROUTING - -# Store all protocols and ports to write to JSON list later. - -if [ ! -z "$ADDITIONAL_PROTOCOLS" ]; then - - list_all_protocols=() - list_all_ports=() - - while read line; do - - protocol=$(echo "$line" | awk '{print $1}') - underlying_protocol=$(echo "$line" | awk '{print $2}') - default_port_number=$(echo "$line" | awk '{print $3}') - - protocol_port="" - existing_port=$(iptables -t nat -S PREROUTING | grep "to-destination $container_ip:$default_port_number" | awk -F'--dport ' '{print $2}' | awk '{print $1}' | head -n 1 || true) - - if [[ -n "$existing_port" ]]; then - # Port already exists, so just assign it to protocol_port - echo "ā„¹ļø This Container already has a $protocol port at $existing_port" - protocol_port="$existing_port" - else - used_protocol_ports=$(iptables -t nat -S PREROUTING | awk -F'--dport ' '/--dport / {print $2}' | awk '{print $1}') - protocol_port=$(comm -23 <(seq 10001 29999 | sort) <(echo "$used_protocol_ports" | sort) | head -n 1 || true) - - if [[ -z "protocol_port" ]]; then - echo "āŒ No available $protocol ports found" - exit 2 - fi - - # Add PREROUTING rule - iptables -t nat -A PREROUTING -i vmbr0 -p "$underlying_protocol" --dport "$protocol_port" -j DNAT --to-destination "$container_ip:$default_port_number" - - # Add POSTROUTING rule - iptables -t nat -A POSTROUTING -o vmbr0 -p "$underlying_protocol" -d "$container_ip" --dport "$default_port_number" -j MASQUERADE - - fi - - list_all_protocols+=("$protocol") - list_all_ports+=("$protocol_port") - - done < <(tac "$ADDITIONAL_PROTOCOLS") - - # Space Seperate Lists - - ss_protocols="$(IFS=, ; echo "${list_all_protocols[*]}")" - ss_ports="$(IFS=, ; echo "${list_all_ports[*]}")" - - #Update NGINX port map JSON on the remote host safely using a heredoc and positional parameters - -ssh root@10.15.20.69 bash -s -- "$hostname" "$container_ip" "$ssh_port" "$http_port" "$ss_protocols" "$ss_ports" <<'EOF' -set -euo pipefail - -hostname="$1" -container_ip="$2" -ssh_port="$3" -http_port="$4" -protos_json=$(echo "$5" | tr ',' '\n' | jq -R . | jq -s .) -ports_json=$(echo "$6" | tr ',' '\n' | jq -R . | jq -s 'map(tonumber)') - -jq --arg hn "$hostname" \ - --arg ip "$container_ip" \ - --argjson ssh "$ssh_port" \ - --argjson http "$http_port" \ - --argjson protos "$protos_json" \ - --argjson ports_list "$ports_json" \ - '. + {($hn): {ip: $ip, ports: ( reduce range(0; $protos | length) as $i ( {ssh: $ssh, http: $http}; . + { ($protos[$i]): $ports_list[$i]}))}}' /etc/nginx/port_map.json > /tmp/port_map.json.new - -mv -f /tmp/port_map.json.new /etc/nginx/port_map.json -nginx -s reload -EOF - -else - -# Update NGINX port map JSON on the remote host safely using a heredoc and positional parameters - -ssh root@10.15.20.69 bash -s -- "$hostname" "$container_ip" "$ssh_port" "$http_port" <<'EOF' -set -euo pipefail - -hostname="$1" -container_ip="$2" -ssh_port="$3" -http_port="$4" - -jq --arg hn "$hostname" \ - --arg ip "$container_ip" \ - --argjson http "$http_port" \ - --argjson ssh "$ssh_port" \ - '. + {($hn): {ip: $ip, ports: {ssh: $ssh, http: $http}}}' /etc/nginx/port_map.json > /tmp/port_map.json.new - -mv -f /tmp/port_map.json.new /etc/nginx/port_map.json -nginx -s reload -EOF - -fi - -# Results - -# Define high-contrast colors -BOLD='\033[1m' -BLUE='\033[34m' -MAGENTA='\033[35m' -GREEN='\033[32m' -CYAN='\033[36m' -RESET='\033[0m' - -# Top border and title -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo -e "${BOLD}šŸ”” ${MAGENTA}COPY THESE PORTS DOWN${RESET} — ${CYAN}For External Access${RESET}" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" -echo -e "šŸ“Œ ${BLUE}Note:${RESET} Your container listens on default ports internally," -echo -e " but EXTERNAL traffic must use the ports listed below:" -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" - -# Port info -echo -e "āœ… ${GREEN}Hostname Registration:${RESET} $hostname → $container_ip" -echo -e "šŸ” ${MAGENTA}SSH Port :${RESET} $ssh_port" -echo -e "🌐 ${BLUE}HTTP Port :${RESET} $http_port" - -# Additional protocols (if any) -if [ ! -z "$ADDITIONAL_PROTOCOLS" ]; then - for i in "${!list_all_protocols[@]}"; do - echo -e "šŸ“” ${CYAN}${list_all_protocols[$i]} Port :${RESET} ${list_all_ports[$i]}" - done -fi - -# Bottom border -echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" \ No newline at end of file diff --git a/container registration/register_proxy_hook.sh b/container registration/register_proxy_hook.sh deleted file mode 100644 index a4ade333..00000000 --- a/container registration/register_proxy_hook.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# /var/lib/vz/snippets/register_proxy_hook.sh - -echo "DEBUG: Hook script /var/lib/vz/snippets/register_proxy_hook.sh started. Event: $2, CTID: $1" >> /tmp/hook_debug.log - -# Hook script for container events -case "$2" in - post-start) - echo "DEBUG: Calling register-container.sh for CTID: $1" >> /tmp/hook_debug.log - /var/lib/vz/snippets/register-container.sh "$1" >> /tmp/hook_debug.log 2>&1 - echo "DEBUG: register-container.sh finished." >> /tmp/hook_debug.log - ;; - *) - echo "DEBUG: Unhandled hook event: $2 for CTID: $1" >> /tmp/hook_debug.log - ;; -esac -echo "DEBUG: Hook script /var/lib/vz/snippets/register_proxy_hook.sh finished." >> /tmp/hook_debug.log \ No newline at end of file diff --git a/intern-phxdc-pve1/prune_iptables.sh b/intern-phxdc-pve1/prune_iptables.sh deleted file mode 100644 index abc1aec2..00000000 --- a/intern-phxdc-pve1/prune_iptables.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash - -# Script to prune iptables rules for containers that no longer exist -# Author: Carter Myers - -# Enable strict mode: -# -e: Exit immediately if a command exits with a non-zero status. -# -u: Treat unset variables as an error when substituting. -# -o pipefail: The return value of a pipeline is the status of the last command -# to exit with a non-zero status, or zero if all commands exit successfully. -set -euo pipefail - -# --- Configuration --- -REMOTE_HOST="intern-nginx" -REMOTE_FILE="/etc/nginx/port_map.json" -LOCAL_FILE="/tmp/port_map.json" -LOG_FILE="/var/log/prune_iptables.log" -PVE_NODES=("localhost" "10.15.0.5") - -# Function to log messages with a timestamp -log_message() { - echo "[$(date)] $1" >> "$LOG_FILE" -} - -# --- 1. Fetch port_map.json from remote host --- -log_message "Fetching port_map.json from $REMOTE_HOST..." -if ! scp "$REMOTE_HOST:$REMOTE_FILE" "$LOCAL_FILE" >/dev/null 2>&1; then - log_message "ERROR: Could not fetch $REMOTE_FILE from $REMOTE_HOST" - exit 1 -fi -log_message "Successfully fetched $REMOTE_FILE to $LOCAL_FILE." - -# --- 2. Build list of existing hostnames --- -EXISTING_HOSTNAMES="" -for node in "${PVE_NODES[@]}"; do - log_message "Checking containers on $node..." - if [[ "$node" == "localhost" ]]; then - CTIDS=$(pct list | awk 'NR>1 {print $1}' || true) - log_message "DEBUG: Local CTIDs: [${CTIDS:-}]" - for id in $CTIDS; do - hn=$(pct config "$id" 2>/dev/null | grep -i '^hostname:' | awk '{print $2}' | tr -d '[:space:]' || true) - [[ -n "$hn" ]] && EXISTING_HOSTNAMES+="$hn"$'\n' - done - else - log_message "DEBUG: Checking remote node: $node" - CTIDS_CMD="pct list | awk 'NR>1 {print \$1}'" - CTIDS_OUTPUT=$(ssh "$node" "$CTIDS_CMD" 2>&1 || true) - if [[ "$CTIDS_OUTPUT" =~ "Permission denied" || "$CTIDS_OUTPUT" =~ "Connection refused" || "$CTIDS_OUTPUT" =~ "Host key verification failed" ]]; then - log_message "ERROR: SSH to $node failed: $CTIDS_OUTPUT" - continue - fi - log_message "DEBUG: CTIDs on $node: [${CTIDS_OUTPUT:-}]" - for id in $CTIDS_OUTPUT; do - HN_CMD="pct config $id 2>/dev/null | grep -i '^hostname:' | awk '{print \$2}'" - HN_OUTPUT=$(ssh "$node" "$HN_CMD" 2>&1 || true) - if [[ "$HN_OUTPUT" =~ "Permission denied" || "$HN_OUTPUT" =~ "No such file" ]]; then - log_message "ERROR: Failed to get hostname for $id on $node: $HN_OUTPUT" - continue - fi - hn=$(echo "$HN_OUTPUT" | tr -d '[:space:]') - [[ -n "$hn" ]] && EXISTING_HOSTNAMES+="$hn"$'\n' - done - fi -done - -# Remove any empty lines from EXISTING_HOSTNAMES -EXISTING_HOSTNAMES=$(echo "$EXISTING_HOSTNAMES" | sed '/^$/d') -log_message "Existing hostnames collected:" -log_message "$EXISTING_HOSTNAMES" - -# --- 3. Prune iptables and port_map.json --- -log_message "Pruning iptables and port_map.json..." -cp "$LOCAL_FILE" "$LOCAL_FILE.bak" -log_message "Created backup of $LOCAL_FILE at $LOCAL_FILE.bak" - -HOSTNAMES_IN_JSON=$(jq -r 'keys[]' "$LOCAL_FILE") -mapfile -t EXISTING_ARRAY <<< "$EXISTING_HOSTNAMES" - -# Helper function to check if a hostname exists in the collected list -hostname_exists() { - local h=$(echo "$1" | tr -d '[:space:]') - for existing in "${EXISTING_ARRAY[@]}"; do - if [[ "${h,,}" == "${existing,,}" ]]; then # Case-insensitive comparison - return 0 - fi - done - return 1 -} - -for hostname in $HOSTNAMES_IN_JSON; do - trimmed_hostname=$(echo "$hostname" | tr -d '[:space:]') - if hostname_exists "$trimmed_hostname"; then - log_message "Keeping entry: $trimmed_hostname" - else - ip=$(jq -r --arg h "$hostname" '.[$h].ip // "unknown"' "$LOCAL_FILE") - ports=$(jq -c --arg h "$hostname" '.[$h].ports // {}' "$LOCAL_FILE") - log_message "Stale entry detected: $hostname (IP: $ip, Ports: $ports) - removing..." - - # --- IPTABLES REMOVAL --- - # Capture rules into an array first to avoid subshell issues with 'while read' - mapfile -t RULES_TO_DELETE < <(sudo iptables -t nat -S | grep -w "$ip" || true) # Added sudo, || true to prevent pipefail if grep finds nothing - - if [[ ${#RULES_TO_DELETE[@]} -gt 0 ]]; then - log_message "Found ${#RULES_TO_DELETE[@]} iptables rules for $hostname. Attempting removal..." - for rule in "${RULES_TO_DELETE[@]}"; do - cleaned_rule=$(echo "$rule" | sed 's/^-A /-D /') - log_message "Attempting to remove iptables rule: sudo iptables -t nat $cleaned_rule" - if sudo iptables -t nat $cleaned_rule; then - log_message "Removed iptables rule: $cleaned_rule" - else - log_message "ERROR: Failed to remove iptables rule: $cleaned_rule (Exit status: $?)" - fi - done - else - log_message "No iptables rules found for $hostname to remove." - fi - - # --- JSON ENTRY REMOVAL --- - log_message "Attempting to remove $hostname from local port_map.json..." - if jq "del(.\"$hostname\")" "$LOCAL_FILE" > "${LOCAL_FILE}.tmp"; then - if mv "${LOCAL_FILE}.tmp" "$LOCAL_FILE"; then - log_message "Successfully removed $hostname from local port_map.json." - else - log_message "ERROR: Failed to move temporary file to $LOCAL_FILE for $hostname." - exit 1 # Critical failure, exit - fi - else - log_message "ERROR: jq failed to delete $hostname from $LOCAL_FILE." - exit 1 # Critical failure, exit - fi - - # Confirm deletion from local file - if jq -e --arg h "$hostname" 'has($h)' "$LOCAL_FILE" >/dev/null; then - log_message "ERROR: $hostname still exists in local port_map.json after deletion attempt!" - else - log_message "Confirmed $hostname removed from local port_map.json." - fi - fi -done - -# --- 4. Upload and verify updated file on remote --- -log_message "Uploading updated port_map.json to $REMOTE_HOST..." -TEMP_REMOTE="/tmp/port_map.json" - -if scp "$LOCAL_FILE" "$REMOTE_HOST:$TEMP_REMOTE" >/dev/null 2>&1; then - log_message "Uploaded to $REMOTE_HOST:$TEMP_REMOTE" -else - log_message "ERROR: Failed to upload $TEMP_REMOTE to $REMOTE_HOST" - exit 1 -fi - -# Check if deleted hostnames still exist in uploaded file -log_message "Verifying remote file content..." -for hostname in $HOSTNAMES_IN_JSON; do - if ! hostname_exists "$hostname"; then # Only check for hostnames that *should* have been deleted - if ssh "$REMOTE_HOST" "grep -q '\"$hostname\"' $TEMP_REMOTE"; then - log_message "WARNING: $hostname still exists in uploaded $TEMP_REMOTE on $REMOTE_HOST!" - else - log_message "Verified $hostname was removed in uploaded file on $REMOTE_HOST." - fi - fi -done - -# Move uploaded file into place on the remote host -log_message "Moving uploaded file into final position on $REMOTE_HOST..." -if ssh "$REMOTE_HOST" "sudo cp $TEMP_REMOTE $REMOTE_FILE && sudo chown root:root $REMOTE_FILE && sudo chmod 644 $REMOTE_FILE && rm $TEMP_REMOTE"; then - log_message "Copied updated port_map.json to $REMOTE_FILE on $REMOTE_HOST" -else - log_message "ERROR: Failed to replace $REMOTE_FILE on $REMOTE_HOST" - exit 1 -fi - -log_message "Prune complete." \ No newline at end of file diff --git a/intern-phxdc-pve1/prune_temp_files.sh b/intern-phxdc-pve1/prune_temp_files.sh deleted file mode 100644 index 1b171fd1..00000000 --- a/intern-phxdc-pve1/prune_temp_files.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash -# Script to prune all temporary files (env vars, protocols, services, and public keys) -# Last Updated July 28th 2025 Maxwell Klema - -LOG_FILE="/var/log/pruneTempFiles.log" - -writeLog() { - echo "[$(date +'%Y-%m-%d %H:%M:%S')]: $1" >> "$LOG_FILE" -} - -# Function to remove temporary environment variable Folders -removeTempEnvVars() { - TEMP_ENV_FOLDER="/var/lib/vz/snippets/container-env-vars" - while read -r line; do - if [[ "$line" == /var/lib/vz/snippets/container-env-vars/env_* ]]; then - rm -rf "$line" > /dev/null 2>&1 - writeLog "Removed temporary environment variable folder: $line" - fi - done < <(find "$TEMP_ENV_FOLDER" -maxdepth 1 -type d -name "env_*") -} - -# Function to remove temporary services file -removeTempServices() { - TEMP_SERVICES_FOLDER="/var/lib/vz/snippets/container-services" - while read -r line; do - if [[ "$line" == /var/lib/vz/snippets/container-services/services_* ]]; then - rm -f "$line" - writeLog "Removed temporary services file: $line" - fi - done < <(find "$TEMP_SERVICES_FOLDER" -maxdepth 1 -type f -name "services_*") -} - -# Function to remove temporary public key files -removeTempPublicKeys() { - TEMP_PUB_FOLDER="/var/lib/vz/snippets/container-public-keys" - while read -r line; do - if [[ "$line" == /var/lib/vz/snippets/container-public-keys/key_* ]]; then - rm -f "$line" - writeLog "Removed temporary public key file: $line" - fi - done < <(find "$TEMP_PUB_FOLDER" -maxdepth 1 -type f -name "key_*") -} - -# Function to remove temporary protocol files -removeTempProtocols() { - TEMP_PROTOCOL_FOLDER="/var/lib/vz/snippets/container-port-maps" - while read -r line; do - if [[ "$line" == /var/lib/vz/snippets/container-port-maps/protocol_list* ]]; then - rm -f "$line" - writeLog "Removed temporary protocol file: $line" - fi - done < <(find "$TEMP_PROTOCOL_FOLDER" -maxdepth 1 -type f -name "protocol_list*") -} - -# Main function to prune all temporary files -pruneTempFiles() { - writeLog "Starting to prune temporary files..." - removeTempEnvVars - removeTempServices - removeTempPublicKeys - removeTempProtocols - writeLog "Finished pruning temporary files." -} - -# Execute the main function -pruneTempFiles -exit 0 \ No newline at end of file diff --git a/jump server/extract-fingerprint.sh b/jump server/extract-fingerprint.sh deleted file mode 100644 index dac307b8..00000000 --- a/jump server/extract-fingerprint.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# A script to collect the client's SSH fingerprint and pass that to the content creation container -# Last Modified June 24th, 2025 by Maxwell Klema -# --------------------- - -CURRENT_TIME=$(date +"%B %d %T") - -USER="create-container" -SSH_CLIENT_IP=$(echo $SSH_CLIENT | awk '{print $1}') -RECENT_LOG=$(journalctl _COMM=sshd | grep "Accepted publickey for $USER from $SSH_CLIENT_IP" | tail -1) -LOGGED_TIME=$(echo $RECENT_LOG | awk '{print $3}') - -#check most recent logged time and current time are only max 2 seconds off since multiple users may log in from same IP over time - -epoch1=$(date -d "today $CURRENT_TIME" +%s) -epoch2=$(date -d "today $LOGGED_TIME" +%s) -diff=$((epoch1 - epoch2)) - -KEY_FINGERPRINT="" - -if [ "$diff" -ge 0 ] && [ "$diff" -le 2 ]; then - KEY_FINGERPRINT=$(echo $RECENT_LOG | grep -o 'SHA256[^ ]*') -fi - -# Export environment variables -export PUBLIC_KEY="$PUBLIC_KEY" -export PROXMOX_USERNAME="$PROXMOX_USERNAME" -export PROXMOX_PASSWORD="$PROXMOX_PASSWORD" -export CONTAINER_NAME="$CONTAINER_NAME" -export CONTAINER_PASSWORD="$CONTAINER_PASSWORD" -export HTTP_PORT="$HTTP_PORT" -export PROJECT_REPOSITORY="$PROJECT_REPOSITORY" -export PROJECT_BRANCH="$PROJECT_BRANCH" -export PROJECT_ROOT="$PROJECT_ROOT" -export REQUIRE_ENV_VARS="$REQUIRE_ENV_VARS" -export CONTAINER_ENV_VARS="$CONTAINER_ENV_VARS" -export INSTALL_COMMAND="$INSTALL_COMMAND" -export BUILD_COMMAND="$BUILD_COMMAND" -export START_COMMAND="$START_COMMAND" -export RUNTIME_LANGUAGE="$RUNTIME_LANGUAGE" -export SERVICES="$SERVICES" -export REQUIRE_SERVICES="$REQUIRE_SERVICES" -export CUSTOM_SERVICES="$CUSTOM_SERVICES" - -# SSH with all SendEnv flags -ssh -o "SendEnv=PUBLIC_KEY PROXMOX_USERNAME PROXMOX_PASSWORD CONTAINER_NAME CONTAINER_PASSWORD HTTP_PORT PROJECT_REPOSITORY PROJECT_BRANCH PROJECT_ROOT REQUIRE_ENV_VARS CONTAINER_ENV_VARS INSTALL_COMMAND BUILD_COMMAND START_COMMAND RUNTIME_LANGUAGE SERVICES REQUIRE_SERVICES CUSTOM_SERVICES" -A create-container@10.15.234.122 diff --git a/nginx proxy/nginx.conf b/nginx proxy/nginx.conf deleted file mode 100644 index 8b53f751..00000000 --- a/nginx proxy/nginx.conf +++ /dev/null @@ -1,29 +0,0 @@ -# /etc/nginx/nginx.conf -user nginx; -worker_processes auto; - -error_log /var/log/nginx/error.log notice; -pid /run/nginx.pid; - -load_module modules/ngx_http_js_module.so; -load_module modules/ngx_stream_js_module.so; - -events { - worker_connections 1024; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format backend '$remote_addr - $remote_user [$time_local] "$request" ' - 'status=$status backend_ip=$backend_ip backend_port=80 ' - '"$http_referer" "$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log backend; - - sendfile on; - keepalive_timeout 65; - - include /etc/nginx/conf.d/*.conf; -} diff --git a/nginx proxy/port_map.js b/nginx proxy/port_map.js deleted file mode 100644 index 0843a810..00000000 --- a/nginx proxy/port_map.js +++ /dev/null @@ -1,94 +0,0 @@ -// /etc/nginx/port_map.js -// This is a reverse proxy configuration for Nginx that uses JavaScript to dynamically -// map subdomains to specific IP addresses based on a JSON file. -// Code is based off of bluehive-testflight's port_map.js -// Last updated: 06-08-2025 Carter Myers \\ 06-25-2025 Maxwell Klema - -var fs = require('fs'); -var filePath = "/etc/nginx/port_map.json"; // Make sure Nginx has read access -var cachedMapping = null; - -function loadMapping() { - try { - var content = fs.readFileSync(filePath); - cachedMapping = JSON.parse(content); - return true; - } catch (e) { - // Optionally log error - return false; - } -} - -function extractSubdomain(r) { - var host = r.variables.host; - var match = host.match(/^([^.]+)\.opensource\.mieweb\.(com|org)$/); - if (!match) { - r.error("Invalid hostname format: " + host); - return null; - } - return match[1]; -} - -function httpLookup(r) { - if (cachedMapping === null && !loadMapping()) { - r.error("Failed to load port mapping file."); - r.return(500); - return; - } - - var subdomain = extractSubdomain(r); - if (!subdomain) { - r.return(500); - return; - } - - var entry = cachedMapping[subdomain]; - if (!entry) { - if (!loadMapping()) { - r.error("Reload failed."); - r.return(500); - return; - } - entry = cachedMapping[subdomain]; - if (!entry) { - r.error("No entry found for subdomain: " + subdomain); - r.return(500); - return; - } - } - - return entry.ports.http.toString(); // Always return string -} - -function ipLookup(r) { - if (cachedMapping === null && !loadMapping()) { - r.error("Failed to load port mapping file."); - r.return(500); - return; - } - - var subdomain = extractSubdomain(r); - if (!subdomain) { - r.return(500); - return; - } - - var entry = cachedMapping[subdomain]; - if (!entry) { - if (!loadMapping()) { - r.error("Reload failed."); - r.return(500); - return; - } - entry = cachedMapping[subdomain]; - if (!entry) { - r.error("No entry found for subdomain: " + subdomain); - r.return(500); - return; - } - } - - return entry.ip; -} - -export default { httpLookup, ipLookup }; \ No newline at end of file diff --git a/nginx proxy/reverse_proxy.conf b/nginx proxy/reverse_proxy.conf deleted file mode 100644 index 596949d3..00000000 --- a/nginx proxy/reverse_proxy.conf +++ /dev/null @@ -1,76 +0,0 @@ -js_import port_module from /etc/nginx/port_map.js; -js_set $backend_ip port_module.ipLookup; -js_set $http_port port_module.httpLookup; - -# Define a custom log format -log_format proxy_log '$remote_addr - $host [$time_local] ' - '"$request" $status $body_bytes_sent ' - '"$http_referer" "$http_user_agent" ' - 'to $backend_ip:80'; - -# Enable access and error logs -access_log /var/log/nginx/reverse_proxy_access.log proxy_log; -error_log /var/log/nginx/reverse_proxy_error.log info; - -# HTTPS, uncomment when nginx gets private key, will not work w/o it -server { - listen 443 ssl; - server_name .opensource.mieweb.org; - - ssl_certificate /root/.acme.sh/opensource.mieweb.org/fullchain.cer; - ssl_certificate_key /root/.acme.sh/opensource.mieweb.org/opensource.mieweb.org.key; - - location / { - if ($backend_ip = "") { - return 404 "Backend IP not found."; - } - - if ($http_port = "") { - return 404 "http port not found."; - } - - proxy_pass http://$backend_ip:$http_port; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_http_version 1.1; # Use HTTP/1.1 for WebSocket support - proxy_set_header Upgrade $http_upgrade; # Upgrade header for WebSocket support - proxy_set_header Connection "upgrade"; # Connection header for WebSocket support - - # Disable response buffering (important for SSE) - proxy_buffering off; - proxy_cache off; - chunked_transfer_encoding off; - proxy_read_timeout 300s; - - } -} - -server { - listen 80; - server_name .opensource.mieweb.com; - - location / { - if ($backend_ip = "") { - return 404 "Backend IP not found."; - } - - if ($http_port = "") { - return 404 "http port not found."; - } - - proxy_pass http://$backend_ip:$http_port; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - # Disable response buffering (important for SSE) - proxy_buffering off; - proxy_cache off; - chunked_transfer_encoding off; - proxy_read_timeout 300s; - - } -} \ No newline at end of file diff --git a/proxmox-launchpad b/proxmox-launchpad deleted file mode 160000 index 038aff5a..00000000 --- a/proxmox-launchpad +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 038aff5ad0eacd9f77935ae8819ab59da13fc981 From 51fa0af202f317d7c9c8e0a3f31842a7ce6c783f Mon Sep 17 00:00:00 2001 From: maxklema Date: Tue, 5 Aug 2025 15:52:00 -0400 Subject: [PATCH 5/8] READMEs in each directory, re-organization, updated ci-cd files --- ci-cd automation/README.md | 1 + ci-cd automation/check-container-exists.sh | 46 +++ ci-cd automation/delete-container.sh | 29 ++ .../helper-scripts/PVE_user_authentication.sh | 23 ++ .../helper-scripts/create-template.sh | 56 ++++ .../helper-scripts/delete-runner.sh | 46 +++ .../helper-scripts/repository_status.sh | 37 +++ .../verify_container_ownership.sh | 37 +++ ci-cd automation/update-container.sh | 266 ++++++++++++++++++ container creation/README.md | 1 + container creation/register-container.sh | 194 +++++++++++++ container creation/start_services.sh | 135 +++++++++ dnsmasq service/README.md | 1 + gateway/README.md | 1 + gateway/extract-fingerprint.sh | 46 +++ gateway/prune_iptables.sh | 173 ++++++++++++ gateway/prune_temp_files.sh | 67 +++++ nginx reverse proxy/README.md | 1 + nginx reverse proxy/nginx.conf | 29 ++ nginx reverse proxy/port_map.js | 94 +++++++ nginx reverse proxy/reverse_proxy.conf | 76 +++++ 21 files changed, 1359 insertions(+) create mode 100644 ci-cd automation/README.md create mode 100644 ci-cd automation/check-container-exists.sh create mode 100644 ci-cd automation/delete-container.sh create mode 100755 ci-cd automation/helper-scripts/PVE_user_authentication.sh create mode 100755 ci-cd automation/helper-scripts/create-template.sh create mode 100755 ci-cd automation/helper-scripts/delete-runner.sh create mode 100755 ci-cd automation/helper-scripts/repository_status.sh create mode 100755 ci-cd automation/helper-scripts/verify_container_ownership.sh create mode 100644 ci-cd automation/update-container.sh create mode 100644 container creation/README.md create mode 100644 container creation/register-container.sh create mode 100644 container creation/start_services.sh create mode 100644 dnsmasq service/README.md create mode 100644 gateway/README.md create mode 100644 gateway/extract-fingerprint.sh create mode 100644 gateway/prune_iptables.sh create mode 100644 gateway/prune_temp_files.sh create mode 100644 nginx reverse proxy/README.md create mode 100644 nginx reverse proxy/nginx.conf create mode 100644 nginx reverse proxy/port_map.js create mode 100644 nginx reverse proxy/reverse_proxy.conf diff --git a/ci-cd automation/README.md b/ci-cd automation/README.md new file mode 100644 index 00000000..b8b97510 --- /dev/null +++ b/ci-cd automation/README.md @@ -0,0 +1 @@ +# CI/CD Automation \ No newline at end of file diff --git a/ci-cd automation/check-container-exists.sh b/ci-cd automation/check-container-exists.sh new file mode 100644 index 00000000..77c8fd8f --- /dev/null +++ b/ci-cd automation/check-container-exists.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Script to check if a container exists, and if so, whether it needs to be updated or cloned. +# Last Modified by Maxwell Klema on July 13th, 2025 +# ----------------------------------------------------- + +outputError() { + echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + echo -e "${BOLD}${MAGENTA}āŒ Script Failed. Exiting... ${RESET}" + echo -e "$2" + echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + exit $1 +} + +RESET="\033[0m" +BOLD="\033[1m" +MAGENTA='\033[35m' + +echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" +echo -e "${BOLD}${MAGENTA}šŸ”Ž Check Container Exists ${RESET}" +echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + +TYPE_RUNNER="true" +source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh +source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh + +STATUS=$? +if [ "$STATUS" != 0 ]; then + exit 1; +fi + +REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") + +# Check if repository folder is present. +if [ "$PVE1" == "true" ]; then + if pct exec $CONTAINER_ID -- test -f /root/container-updates.log; then + exit 2; # Update Repository + else + exit 0; # Clone Repository + fi +else + if ssh 10.15.0.5 "pct exec $CONTAINER_ID -- test -f /root/container-updates.log"; then + exit 2; # Update Repository + else + exit 0; # Clone Repository + fi +fi \ No newline at end of file diff --git a/ci-cd automation/delete-container.sh b/ci-cd automation/delete-container.sh new file mode 100644 index 00000000..d46ce23b --- /dev/null +++ b/ci-cd automation/delete-container.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Script to delete a container permanently +# Last Modified by Maxwell Klema on July 13th, 2025 +# ----------------------------------------------------- + +RESET="\033[0m" +BOLD="\033[1m" +MAGENTA='\033[35m' + +echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" +echo -e "${BOLD}${MAGENTA}šŸ—‘ļø Delete Container ${RESET}" +echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + +CMD=( +bash /var/lib/vz/snippets/helper-scripts/delete-runner.sh +"$PROJECT_REPOSITORY" +"$GITHUB_PAT" +"$PROXMOX_USERNAME" +"$PROXMOX_PASSWORD" +"$CONTAINER_NAME" +) + +# Safely quote each argument for the shell +QUOTED_CMD=$(printf ' %q' "${CMD[@]}") + +tmux new-session -d -s delete-runner "$QUOTED_CMD" + +echo "āœ… Container with name \"$CONTAINER_NAME\" will be permanently deleted." +exit 0 # Container Deleted Successfully \ No newline at end of file diff --git a/ci-cd automation/helper-scripts/PVE_user_authentication.sh b/ci-cd automation/helper-scripts/PVE_user_authentication.sh new file mode 100755 index 00000000..a7b84f34 --- /dev/null +++ b/ci-cd automation/helper-scripts/PVE_user_authentication.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Script that checks if a user is authenticated in Proxmox PVE Realm @ opensource.mieweb.org +# Last Modified by Maxwell Klema on July 13th, 2025 +# ----------------------------------------------------- + +# Authenticate User (Only Valid Users can Create Containers) + +if [ -z "$PROXMOX_USERNAME" ]; then + read -p "Enter Proxmox Username → " PROXMOX_USERNAME +fi + +if [ -z "$PROXMOX_PASSWORD" ]; then + read -sp "Enter Proxmox Password → " PROXMOX_PASSWORD + echo "" +fi + +USER_AUTHENTICATED=$(ssh root@10.15.234.122 "node /root/bin/js/runner.js authenticateUser \"$PROXMOX_USERNAME\" \"$PROXMOX_PASSWORD\"") + +if [ $USER_AUTHENTICATED == 'false' ]; then + outputError 1 "Your Proxmox account, $PROXMOX_USERNAME@pve, was not authenticated. Retry with valid credentials." +fi + +echo "šŸŽ‰ Your proxmox account, $PROXMOX_USERNAME@pve, has been authenticated" \ No newline at end of file diff --git a/ci-cd automation/helper-scripts/create-template.sh b/ci-cd automation/helper-scripts/create-template.sh new file mode 100755 index 00000000..54d5e1ea --- /dev/null +++ b/ci-cd automation/helper-scripts/create-template.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# Creates a template of a LXC container +# Last modified by Maxwell Klema on July 23rd, 2025. +# -------------------------------------------------- + +if [ "${DEPLOY_ON_START^^}" != "Y" ] || [ "${GH_ACTION^^}" != "Y" ]; then + return 0 +fi + +DEFAULT_BRANCH=$(curl -s https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME | jq -r '.default_branch') + +if [ "$DEFAULT_BRANCH" != "$PROJECT_BRANCH" ]; then + return 0 +fi + +echo "šŸ“ Creating Container Template..." + +# Check if template already exists, and if it does, destroy it ===== + +TEMPLATE_NAME="template-$REPO_BASE_NAME-$REPO_BASE_NAME_WITH_OWNER" +TEMPLATE_CONTAINER_ID=$( { pct list; ssh root@10.15.0.5 'pct list'; } | awk -v name="$TEMPLATE_NAME" '$3 == name {print $1}') + +if [ ! -z "$TEMPLATE_CONTAINER_ID" ]; then + pct destroy $TEMPLATE_CONTAINER_ID | true +fi + +# Clone LXC container and convert it into a template ===== + +NEXT_ID=$(pvesh get /cluster/nextid) + +if (( $CONTAINER_ID % 2 == 101 )); then + ssh root@10.15.0.5 " + pct clone $CONTAINER_ID $NEXT_ID \ + --hostname "$TEMPLATE_NAME" \ + --full true + pct migrate $NEXT_ID intern-phxdc-pve1 --target-storage containers-pve1 + " > /dev/null 2>&1 +else + pct clone $CONTAINER_ID $NEXT_ID \ + --hostname "$TEMPLATE_NAME" \ + --full true +fi + +# AUTH_TOKEN_RESPONSE=$(curl --location --request POST https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners/registration-token --header "Authorization: token $GITHUB_PAT") +# TOKEN=$(echo "$AUTH_TOKEN_RESPONSE" | jq -r '.token') + +# Remove rsa keys ==== +pct start $NEXT_ID +pct enter $NEXT_ID < /dev/null 2>&1 + else + ssh root@10.15.0.5 "pct destroy $CONTAINER_ID" > /dev/null 2>&1 + fi +else + if pct status "$CONTAINER_ID" | grep -q "status: running"; then + pct stop "$CONTAINER_ID" && pct destroy "$CONTAINER_ID" > /dev/null 2>&1 + else + pct destroy "$CONTAINER_ID" > /dev/null 2>&1 + fi +fi + +source /usr/local/bin/prune_iptables.sh + +REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") +REPO_BASE_NAME_WITH_OWNER=$(echo "$PROJECT_REPOSITORY" | cut -d'/' -f4) + +RUNNERS=$(curl --location https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners --header "Authorization: token $GITHUB_PAT") + +while read -r RUNNER; do + RUNNER_NAME=$(echo "$RUNNER" | jq -r '.name') + if [ "$RUNNER_NAME" == "$CONTAINER_NAME" ]; then + RUNNER_ID=$(echo "$RUNNER" | jq -r '.id') + curl --location --request DELETE "https://api.github.com/repos/$REPO_BASE_NAME_WITH_OWNER/$REPO_BASE_NAME/actions/runners/$RUNNER_ID" \ + --header "Authorization: token $GITHUB_PAT" + fi +done < <(echo "$RUNNERS" | jq -c '.runners[]') \ No newline at end of file diff --git a/ci-cd automation/helper-scripts/repository_status.sh b/ci-cd automation/helper-scripts/repository_status.sh new file mode 100755 index 00000000..5ec73b76 --- /dev/null +++ b/ci-cd automation/helper-scripts/repository_status.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Helper script to determine if container needs to clone repository or simply update it +# Last Modified by Maxwell Klema on July 21st, 2025 +# ------------------------------------------------- + +set +e +TYPE_RUNNER="true" +source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh +source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh + +STATUS=$? + +if [ "$STATUS" != 0 ]; then + exit 1; +fi + +REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") + +# Check if repository folder is present. + +if [ "$PVE1" == "true" ]; then + if pct exec $CONTAINER_ID -- test -d /root/$REPO_BASE_NAME; then + echo "Update" + exit 2; # Update Repository + else + echo "Clone" + exit 0; # Clone Repository + fi +else + if ssh 10.15.0.5 "pct exec $CONTAINER_ID -- test -d /root/$REPO_BASE_NAME"; then + echo "Update" + exit 2; # Update Repository + else + echo "Clone" + exit 0; # Clone Repository + fi +fi \ No newline at end of file diff --git a/ci-cd automation/helper-scripts/verify_container_ownership.sh b/ci-cd automation/helper-scripts/verify_container_ownership.sh new file mode 100755 index 00000000..a0727cc2 --- /dev/null +++ b/ci-cd automation/helper-scripts/verify_container_ownership.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Script to verify container ownership based on name and CTID +# Last Modified by Maxwell Klema on July 13th, 2025 +# ----------------------------------------------------- + +CONTAINER_NAME="${CONTAINER_NAME,,}" + +if [ -z "$CONTAINER_NAME" ]; then + read -p "Enter Container Name → " CONTAINER_NAME +fi + +CONTAINER_ID=$( { pct list; ssh root@10.15.0.5 'pct list'; } | awk -v name="$CONTAINER_NAME" '$3 == name {print $1}') + +if [ -z "$CONTAINER_ID" ]; then + echo "āœ… Container with name \"$CONTAINER_NAME\" is available for use." + return 1 +fi + +if [ "$TYPE_RUNNER" != "true" ]; then + if (( $CONTAINER_ID % 2 == 0 )); then + CONTAINER_OWNERSHIP=$(ssh root@10.15.0.5 "pct config \"$CONTAINER_ID\" | grep "tags" | grep \"$PROXMOX_USERNAME\"") + else + CONTAINER_OWNERSHIP=$(pct config "$CONTAINER_ID" | grep "tags" | grep -E "(^|;)$PROXMOX_USERNAME(;|$)") + fi +else + CONTAINER_OWNERSHIP=$(ssh root@10.15.0.5 "pct config \"$CONTAINER_ID\" | grep "tags" | grep \"$PROXMOX_USERNAME\"") + PVE1="false" + if [ -z "$CONTAINER_OWNERSHIP" ]; then + CONTAINER_OWNERSHIP=$(pct config "$CONTAINER_ID" | grep "tags" | grep -E "(^|;)$PROXMOX_USERNAME(;|$)") + PVE1="true" + fi +fi + +if [ -z "$CONTAINER_OWNERSHIP" ]; then + echo "āŒ You do not own the container with name \"$CONTAINER_NAME\"." + outputError 1 "You do not own the container with name \"$CONTAINER_NAME\"." +fi diff --git a/ci-cd automation/update-container.sh b/ci-cd automation/update-container.sh new file mode 100644 index 00000000..484f4996 --- /dev/null +++ b/ci-cd automation/update-container.sh @@ -0,0 +1,266 @@ +#!/bin/bash +# Script to automatically fetch new contents from a branch, push them to container, and restart intern +# Last Modified on August 5th, 2025 by Maxwell Klema +# ---------------------------------------- + +RESET="\033[0m" +BOLD="\033[1m" +MAGENTA='\033[35m' + +outputError() { + echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + echo -e "${BOLD}${MAGENTA}āŒ Script Failed. Exiting... ${RESET}" + echo -e "$2" + echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + exit $1 +} + + +echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" +echo -e "${BOLD}${MAGENTA}šŸ”„ Update Container Contents ${RESET}" +echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + +if [ -z "${RUNTIME_LANGUAGE^^}" ]; then + echo "Skipping container update because there is nothing to update." + exit 0 +fi + +source /var/lib/vz/snippets/helper-scripts/PVE_user_authentication.sh +source /var/lib/vz/snippets/helper-scripts/verify_container_ownership.sh + +# Get Project Details + +CONTAINER_NAME="${CONTAINER_NAME,,}" + +if [ -z "$PROJECT_REPOSITORY" ]; then + read -p "šŸš€ Paste the link to your project repository → " PROJECT_REPOSITORY +else + DEPLOY_ON_START="y" +fi + +CheckRepository() { + PROJECT_REPOSITORY_SHORTENED=${PROJECT_REPOSITORY#*github.com/} + PROJECT_REPOSITORY_SHORTENED=${PROJECT_REPOSITORY_SHORTENED%.git} + REPOSITORY_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" https://github.com/$PROJECT_REPOSITORY_SHORTENED) +} + +CheckRepository + +if [ "$REPOSITORY_EXISTS" != "200" ]; then + outputError 1 "The repository link you provided, \"$PROJECT_REPOSITORY\" was not valid." +fi + +echo "āœ… The repository link you provided, \"$PROJECT_REPOSITORY\", was valid." + +# Get Project Branch + +if [ -z "$PROJECT_BRANCH" ]; then + PROJECT_BRANCH="main" +fi + +REPOSITORY_BRANCH_EXISTS=$(curl -s -o /dev/null -w "%{http_code}" https://api.github.com/repos/$PROJECT_REPOSITORY_SHORTENED/branches/$PROJECT_BRANCH) + +if [ "$REPOSITORY_BRANCH_EXISTS" != "200" ]; then + outputError 1 "The branch you provided, \"$PROJECT_BRANCH\", does not exist on repository at \"$PROJECT_REPOSITORY\"." +fi + + +# # Get Project Root Directroy + +if [ "$PROJECT_ROOT" == "." ] || [ -z "$PROJECT_ROOT" ]; then + PROJECT_ROOT="/" +fi + +VALID_PROJECT_ROOT=$(ssh root@10.15.234.122 "node /root/bin/js/runner.js authenticateRepo \"$PROJECT_REPOSITORY\" \"$PROJECT_BRANCH\" \"$PROJECT_ROOT\"") + +if [ "$VALID_PROJECT_ROOT" == "false" ]; then + outputError 1 "The root directory you provided, \"$PROJECT_ROOT\", does not exist on branch, \"$PROJECT_BRANCH\", on repository at \"$PROJECT_REPOSITORY\"." +fi + +REPO_BASE_NAME=$(basename -s .git "$PROJECT_REPOSITORY") +REPO_BASE_NAME_WITH_OWNER=$(echo "$PROJECT_REPOSITORY" | cut -d'/' -f4) + +if [ "$PROJECT_ROOT" == "" ] || [ "$PROJECT_ROOT" == "/" ]; then + PROJECT_ROOT="." +fi + +# Install Services ==== + +echo "šŸ›Žļø Installing Services..." + +if [ -z "$LINUX_DISTRIBUTION" ]; then + LINUX_DISTRIBUTION="debian" +fi + +if [ ! -z "$SERVICES" ] || [ ! -z "$CUSTOM_SERVICES" ]; then + REQUIRE_SERVICES="y" +fi + +SERVICE_COMMANDS=$(ssh -o SendEnv="LINUX_DISTRIBUTION SERVICES CUSTOM_SERVICES REQUIRE_SERVICES" \ + root@10.15.234.122 \ + "/root/bin/deployment-scripts/gatherServices.sh true") + +echo "$SERVICE_COMMANDS" | while read -r line; do + pct exec $CONTAINER_ID -- bash -c "$line | true" > /dev/null 2>&1 +done + +# Change HTTP port if necessary ==== + +if [ ! -z "$HTTP_PORT" ]; then + if [ "$HTTP_PORT" -lt 80 ] || [ "$HTTP_PORT" -gt 60000 ]; then + outputError 1 "Invalid HTTP port: $HTTP_PORT. Must be between 80 and 60000." + fi + ssh root@10.15.20.69 -- \ +"jq \ '.[\"$CONTAINER_NAME\"].ports.http = $HTTP_PORT' \ + /etc/nginx/port_map.json > /tmp/port_map.json.new \ + && mv -f /tmp/port_map.json.new /etc/nginx/port_map.json " +fi + + +# Clone repository if needed ==== + +if (( "$CONTAINER_ID" % 2 == 0 )); then + ssh root@10.15.0.5 " + pct enter $CONTAINER_ID < /dev/null +fi +EOF + " +else + pct enter $CONTAINER_ID < /dev/null +fi +EOF +fi + +# Update Container with New Contents from repository ===== + +startComponentPVE1() { + + RUNTIME="$1" + BUILD_CMD="$2" + START_CMD="$3" + COMP_DIR="$4" + INSTALL_CMD="$5" + + if [ "${RUNTIME^^}" == "NODEJS" ]; then + pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 + pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/ && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull" > /dev/null 2>&1 + pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $INSTALL_CMD && $BUILD_CMD" > /dev/null 2>&1 + pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 + elif [ "${RUNTIME^^}" == "PYTHON" ]; then + pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 + pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/ && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull" > /dev/null 2>&1 + pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate && $INSTALL_CMD && $BUILD_CMD" > /dev/null 2>&1 + pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 + fi +} + +startComponentPVE2() { + + RUNTIME="$1" + BUILD_CMD="$2" + START_CMD="$3" + COMP_DIR="$4" + INSTALL_CMD="$5" + + if [ "${RUNTIME^^}" == "NODEJS" ]; then + ssh root@10.15.0.5 " + pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 && + pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/ && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull' > /dev/null 2>&1 + pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $INSTALL_CMD' && '$BUILD_CMD' > /dev/null 2>&1 + pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 + " + elif [ "${RUNTIME^^}" == "PYTHON" ]; then + ssh root@10.15.0.5 " + pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 && + pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && git fetch origin && git reset --hard origin/$PROJECT_BRANCH && git pull' > /dev/null 2>&1 + pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate && $INSTALL_CMD' && '$BUILD_CMD' > /dev/null 2>&1 + pct set $CONTAINER_ID --memory 2048 --swap 0 --cores 2 + " + fi +} + + +if [ ! -z "$RUNTIME_LANGUAGE" ] && echo "$RUNTIME_LANGUAGE" | jq . >/dev/null 2>&1; then # If RUNTIME_LANGUAGE is set and is valid JSON + MULTI_COMPONENT="Y" +fi + +if [ "${MULTI_COMPONENT^^}" == "Y" ]; then + for COMPONENT in $(echo "$START_COMMAND" | jq -r 'keys[]'); do + START=$(echo "$START_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') + RUNTIME=$(echo "$RUNTIME_LANGUAGE" | jq -r --arg k "$COMPONENT" '.[$k]') + BUILD=$(echo "$BUILD_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') + INSTALL=$(echo "$INSTALL_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') + if [ "$BUILD" == "null" ]; then + BUILD="" + fi + + if (( "$CONTAINER_ID" % 2 == 0 )); then + startComponentPVE2 "$RUNTIME" "$BUILD" "$START" "$COMPONENT" "$INSTALL" + else + startComponentPVE1 "$RUNTIME" "$BUILD" "$START" "$COMPONENT" "$INSTALL" + fi + done + if [ ! -z "$ROOT_START_COMMAND" ]; then + if (( $CONTAINER_ID % 2 == 0 )); then + ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND'" + else + pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND" + fi + fi + # startComponent "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." +else + if (( $CONTAINER_ID % 2 == 0 )); then + startComponentPVE2 "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." "$INSTALL_COMMAND" + else + startComponentPVE1 "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." "$INSTALL_COMMAND" + fi +fi + +# Update Log File + +if (( "$CONTAINER_ID" % 2 == 0 )); then + ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'echo \"[$(date)]\" >> /root/container-updates.log'" +else + pct exec $CONTAINER_ID -- bash -c "echo \"[$(date)]\" >> /root/container-updates.log" +fi + +# Create new template if on default branch ===== + +UPDATE_CONTAINER="true" +BUILD_COMMAND_B64=$(echo -n "$BUILD_COMMAND" | base64) +RUNTIME_LANGUAGE_B64=$(echo -n "$RUNTIME_LANGUAGE" | base64) +START_COMMAND_B64=$(echo -n "$START_COMMAND" | base64) + +CMD=( +bash /var/lib/vz/snippets/start_services.sh +"$CONTAINER_ID" +"$CONTAINER_NAME" +"$REPO_BASE_NAME" +"$REPO_BASE_NAME_WITH_OWNER" +"$SSH_PORT" +"$CONTAINER_IP" +"$PROJECT_ROOT" +"$ROOT_START_COMMAND" +"$DEPLOY_ON_START" +"$MULTI_COMPONENT" +"$START_COMMAND_B64" +"$BUILD_COMMAND_B64" +"$RUNTIME_LANGUAGE_B64" +"$GH_ACTION" +"$PROJECT_BRANCH" +"$UPDATE_CONTAINER" +) + +# Safely quote each argument for the shell +QUOTED_CMD=$(printf ' %q' "${CMD[@]}") + +tmux new-session -d -s "$CONTAINER_NAME" "$QUOTED_CMD" +echo "āœ… Container $CONTAINER_ID has been updated with new contents from branch \"$PROJECT_BRANCH\" on repository \"$PROJECT_REPOSITORY\"." +exit 0 + diff --git a/container creation/README.md b/container creation/README.md new file mode 100644 index 00000000..a2ca2a56 --- /dev/null +++ b/container creation/README.md @@ -0,0 +1 @@ +# Container Creation \ No newline at end of file diff --git a/container creation/register-container.sh b/container creation/register-container.sh new file mode 100644 index 00000000..83a3cdd4 --- /dev/null +++ b/container creation/register-container.sh @@ -0,0 +1,194 @@ +#!/bin/bash +# var/lib/vz/snippets/register-container.sh +# Script to register a container's IP and ports in the NGINX port map JSON file. +# Last Modified June 27 2025 by Maxwell Klema + +set -euo pipefail + +if [[ -z "${1-}" || -z "${2-}" ]]; then + echo "Usage: $0 " + exit 1 +fi + +CTID="$1" +http_port="$2" +ADDITIONAL_PROTOCOLS="${3-}" #set to empty string if not passed + +# Redirect stdout and stderr to a log file +LOGFILE="/var/log/pve-hook-$CTID.log" +exec > >(tee -a "$LOGFILE") 2>&1 + +echo "---- Hookscript started at $(date) ----" +echo "ā³ Waiting for container to boot and get DHCP lease..." +#sleep 10 + +# Extract IP +container_ip="" +attempts=0 +max_attempts=5 + +while [[ -z "$container_ip" && $attempts -lt $max_attempts ]]; do + container_ip=$(pct exec "$CTID" -- ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d'/' -f1) + [[ -z "$container_ip" ]] && sleep 2 && ((attempts++)) +done + +if [[ -z "$container_ip" ]]; then + echo "āŒ Failed to obtain IP for container $CTID" + exit 1 +fi + +hostname=$(pct exec "$CTID" -- hostname) + +# Check if this container already has a SSH port assigned in PREROUTING + +existing_ssh_port=$(iptables -t nat -S PREROUTING | grep "to-destination $container_ip:22" | awk -F'--dport ' '{print $2}' | awk '{print $1}' | head -n 1 || true) + +if [[ -n "$existing_ssh_port" ]]; then + echo "ā„¹ļø Container already has SSH port $existing_ssh_port" + ssh_port="$existing_ssh_port" +else + # Get used SSH ports + used_ssh_ports=$(iptables -t nat -S PREROUTING | awk -F'--dport ' '/--dport / {print $2}' | awk '/22$/' | awk '{print $1}') + ssh_port=$(comm -23 <(seq 2222 2999 | sort) <(echo "$used_ssh_ports" | sort) | head -n 1) + + if [[ -z "$ssh_port" ]]; then + echo "āŒ No available SSH ports found" + exit 2 + fi + + # Add PREROUTING rule + iptables -t nat -A PREROUTING -i vmbr0 -p tcp --dport "$ssh_port" -j DNAT --to-destination "$container_ip:22" + + # Add POSTROUTING rule + iptables -t nat -A POSTROUTING -o vmbr0 -p tcp -d "$container_ip" --dport 22 -j MASQUERADE +fi + +# Take input file of protocols, check if the container already has a port assigned for those protocols in PREROUTING + +# Store all protocols and ports to write to JSON list later. + +if [ ! -z "$ADDITIONAL_PROTOCOLS" ]; then + + list_all_protocols=() + list_all_ports=() + + while read line; do + + protocol=$(echo "$line" | awk '{print $1}') + underlying_protocol=$(echo "$line" | awk '{print $2}') + default_port_number=$(echo "$line" | awk '{print $3}') + + protocol_port="" + existing_port=$(iptables -t nat -S PREROUTING | grep "to-destination $container_ip:$default_port_number" | awk -F'--dport ' '{print $2}' | awk '{print $1}' | head -n 1 || true) + + if [[ -n "$existing_port" ]]; then + # Port already exists, so just assign it to protocol_port + echo "ā„¹ļø This Container already has a $protocol port at $existing_port" + protocol_port="$existing_port" + else + used_protocol_ports=$(iptables -t nat -S PREROUTING | awk -F'--dport ' '/--dport / {print $2}' | awk '{print $1}') + protocol_port=$(comm -23 <(seq 10001 29999 | sort) <(echo "$used_protocol_ports" | sort) | head -n 1 || true) + + if [[ -z "protocol_port" ]]; then + echo "āŒ No available $protocol ports found" + exit 2 + fi + + # Add PREROUTING rule + iptables -t nat -A PREROUTING -i vmbr0 -p "$underlying_protocol" --dport "$protocol_port" -j DNAT --to-destination "$container_ip:$default_port_number" + + # Add POSTROUTING rule + iptables -t nat -A POSTROUTING -o vmbr0 -p "$underlying_protocol" -d "$container_ip" --dport "$default_port_number" -j MASQUERADE + + fi + + list_all_protocols+=("$protocol") + list_all_ports+=("$protocol_port") + + done < <(tac "$ADDITIONAL_PROTOCOLS") + + # Space Seperate Lists + + ss_protocols="$(IFS=, ; echo "${list_all_protocols[*]}")" + ss_ports="$(IFS=, ; echo "${list_all_ports[*]}")" + + #Update NGINX port map JSON on the remote host safely using a heredoc and positional parameters + +ssh root@10.15.20.69 bash -s -- "$hostname" "$container_ip" "$ssh_port" "$http_port" "$ss_protocols" "$ss_ports" <<'EOF' +set -euo pipefail + +hostname="$1" +container_ip="$2" +ssh_port="$3" +http_port="$4" +protos_json=$(echo "$5" | tr ',' '\n' | jq -R . | jq -s .) +ports_json=$(echo "$6" | tr ',' '\n' | jq -R . | jq -s 'map(tonumber)') + +jq --arg hn "$hostname" \ + --arg ip "$container_ip" \ + --argjson ssh "$ssh_port" \ + --argjson http "$http_port" \ + --argjson protos "$protos_json" \ + --argjson ports_list "$ports_json" \ + '. + {($hn): {ip: $ip, ports: ( reduce range(0; $protos | length) as $i ( {ssh: $ssh, http: $http}; . + { ($protos[$i]): $ports_list[$i]}))}}' /etc/nginx/port_map.json > /tmp/port_map.json.new + +mv -f /tmp/port_map.json.new /etc/nginx/port_map.json +nginx -s reload +EOF + +else + +# Update NGINX port map JSON on the remote host safely using a heredoc and positional parameters + +ssh root@10.15.20.69 bash -s -- "$hostname" "$container_ip" "$ssh_port" "$http_port" <<'EOF' +set -euo pipefail + +hostname="$1" +container_ip="$2" +ssh_port="$3" +http_port="$4" + +jq --arg hn "$hostname" \ + --arg ip "$container_ip" \ + --argjson http "$http_port" \ + --argjson ssh "$ssh_port" \ + '. + {($hn): {ip: $ip, ports: {ssh: $ssh, http: $http}}}' /etc/nginx/port_map.json > /tmp/port_map.json.new + +mv -f /tmp/port_map.json.new /etc/nginx/port_map.json +nginx -s reload +EOF + +fi + +# Results + +# Define high-contrast colors +BOLD='\033[1m' +BLUE='\033[34m' +MAGENTA='\033[35m' +GREEN='\033[32m' +CYAN='\033[36m' +RESET='\033[0m' + +# Top border and title +echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" +echo -e "${BOLD}šŸ”” ${MAGENTA}COPY THESE PORTS DOWN${RESET} — ${CYAN}For External Access${RESET}" +echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" +echo -e "šŸ“Œ ${BLUE}Note:${RESET} Your container listens on default ports internally," +echo -e " but EXTERNAL traffic must use the ports listed below:" +echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" + +# Port info +echo -e "āœ… ${GREEN}Hostname Registration:${RESET} $hostname → $container_ip" +echo -e "šŸ” ${MAGENTA}SSH Port :${RESET} $ssh_port" +echo -e "🌐 ${BLUE}HTTP Port :${RESET} $http_port" + +# Additional protocols (if any) +if [ ! -z "$ADDITIONAL_PROTOCOLS" ]; then + for i in "${!list_all_protocols[@]}"; do + echo -e "šŸ“” ${CYAN}${list_all_protocols[$i]} Port :${RESET} ${list_all_ports[$i]}" + done +fi + +# Bottom border +echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}" \ No newline at end of file diff --git a/container creation/start_services.sh b/container creation/start_services.sh new file mode 100644 index 00000000..7942ad02 --- /dev/null +++ b/container creation/start_services.sh @@ -0,0 +1,135 @@ +#!/bin/bash +# Script ran by a virtual terminal session to start services and migrate a container +# Script is only ran on GH action workflows when runner disconnects +# Last Modified by Maxwell Klema on August 5th, 2025 +# ------------------------------------------------ + +CONTAINER_ID="$1" +CONTAINER_NAME="$2" +REPO_BASE_NAME="$3" +REPO_BASE_NAME_WITH_OWNER="$4" +SSH_PORT="$5" +CONTAINER_IP="$6" +PROJECT_ROOT="$7" +ROOT_START_COMMAND="$8" +DEPLOY_ON_START="$9" +MULTI_COMPONENT="${10}" +START_COMMAND=$(echo "${11}" | base64 -d) +BUILD_COMMAND=$(echo "${12}" | base64 -d) +RUNTIME_LANGUAGE=$(echo "${13}" | base64 -d) +GH_ACTION="${14}" +PROJECT_BRANCH="${15}" +UPDATE_CONTAINER="${16}" +CONTAINER_NAME="${CONTAINER_NAME,,}" + +if [ "${GH_ACTION^^}" == "Y" ]; then + sleep 8 # Wait for Job to Complete +fi + +if (( $CONTAINER_ID % 2 == 0 )) && [ "$UPDATE_CONTAINER" == "true" ]; then + ssh root@10.15.0.5 "pct stop $CONTAINER_ID" > /dev/null 2>&1 +else + pct stop $CONTAINER_ID > /dev/null 2>&1 +fi + +# Create template if on default branch ==== +source /var/lib/vz/snippets/helper-scripts/create-template.sh + +if (( $CONTAINER_ID % 2 == 0 )); then + + if [ "$UPDATE_CONTAINER" != "true" ]; then + pct migrate $CONTAINER_ID intern-phxdc-pve2 --target-storage containers-pve2 --online > /dev/null 2>&1 + sleep 5 # wait for migration to finish (fix this later) + fi + + ssh root@10.15.0.5 "pct start $CONTAINER_ID" + ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'chmod 700 ~/.bashrc'" # enable full R/W/X permissions + ssh root@10.15.0.5 "pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4" + + if [ "${GH_ACTION^^}" == "Y" ]; then + ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- systemctl start github-runner" + fi + + startProject() { + + RUNTIME="$1" + BUILD_CMD="$2" + START_CMD="$3" + COMP_DIR="$4" + + if [ -z "$BUILD_CMD" ]; then + BUILD_CMD="true" + fi + + if [ "${RUNTIME^^}" == "NODEJS" ]; then + ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c \"mkdir -p /tmp && chmod 1777 /tmp && mkdir -p /tmp/tmux-0 && chmod 700 /tmp/tmux-0 && TMUX_TMPDIR=/tmp tmux new-session -d 'export HOME=/root export PATH=\\\$PATH:/usr/local/bin && cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $BUILD_CMD && $START_CMD'\"" > /dev/null 2>&1 + elif [ "${RUNTIME^^}" == "PYTHON" ]; then + ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c \"mkdir -p /tmp && chmod 1777 /tmp && mkdir -p /tmp/tmux-0 && chmod 700 /tmp/tmux-0 && TMUX_TMPDIR=/tmp tmux new-session -d 'export HOME=/root export PATH=\\\$PATH:/usr/local/bin && cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate $BUILD_CMD && $START_CMD'\"" > /dev/null 2>&1 + fi + + } + + if [ "${DEPLOY_ON_START^^}" == "Y" ]; then + if [ "${MULTI_COMPONENT^^}" == "Y" ]; then + for COMPONENT in $(echo "$START_COMMAND" | jq -r 'keys[]'); do + START=$(echo "$START_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') + RUNTIME=$(echo "$RUNTIME_LANGUAGE" | jq -r --arg k "$COMPONENT" '.[$k]') + BUILD=$(echo "$BUILD_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') + if [ "$BUILD" == "null" ]; then + BUILD="" + fi + startProject "$RUNTIME" "$BUILD" "$START" "$COMPONENT" + done + if [ ! -z "$ROOT_START_COMMAND" ]; then + ssh root@10.15.0.5 "pct exec $CONTAINER_ID -- bash -c 'cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND'" > /dev/null 2>&1 + fi + else + startProject "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." + fi + fi + +# PVE 1 +else + pct start $CONTAINER_ID || true + sleep 5 + if [ "${GH_ACTION^^}" == "Y" ]; then + pct exec $CONTAINER_ID -- bash -c "systemctl start github-runner" + fi + + startComponent() { + + RUNTIME="$1" + BUILD_CMD="$2" + START_CMD="$3" + COMP_DIR="$4" + + if [ -z "$BUILD_CMD" ]; then + BUILD_CMD="true" + fi + + if [ "${RUNTIME^^}" == "NODEJS" ]; then + pct exec "$CONTAINER_ID" -- bash -c "mkdir -p /tmp && chmod 1777 /tmp && mkdir -p /tmp/tmux-0 && chmod 700 /tmp/tmux-0 && TMUX_TMPDIR=/tmp/tmux-0 tmux new-session -d \"export HOME=/root && export PATH=\$PATH:/usr/local/bin && cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && $BUILD_CMD && $START_CMD\"" + elif [ "${RUNTIME^^}" == "PYTHON" ]; then + pct exec "$CONTAINER_ID" -- bash -c "mkdir -p /tmp && chmod 1777 /tmp && mkdir -p /tmp/tmux-0 && chmod 700 /tmp/tmux-0 && TMUX_TMPDIR=/tmp/tmux-0 tmux new-session -d \"export HOME=/root &&export PATH=\$PATH:/usr/local/bin && cd /root/$REPO_BASE_NAME/$PROJECT_ROOT/$COMP_DIR && source venv/bin/activate && $BUILD_CMD && $START_CMD\"" + fi + } + + pct set $CONTAINER_ID --memory 4096 --swap 0 --cores 4 > /dev/null #temporarily bump up container resources for computation hungry processes (e.g. meteor) + if [ "${MULTI_COMPONENT^^}" == "Y" ]; then + for COMPONENT in $(echo "$START_COMMAND" | jq -r 'keys[]'); do + START=$(echo "$START_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') + RUNTIME=$(echo "$RUNTIME_LANGUAGE" | jq -r --arg k "$COMPONENT" '.[$k]') + BUILD=$(echo "$BUILD_COMMAND" | jq -r --arg k "$COMPONENT" '.[$k]') + if [ "$BUILD" == "null" ]; then + BUILD="" + fi + + startComponent "$RUNTIME" "$BUILD" "$START" "$COMPONENT" + done + if [ ! -z "$ROOT_START_COMMAND" ]; then + pct exec $CONTAINER_ID -- bash -c "cd /root/$REPO_BASE_NAME/$PROJECT_ROOT && $ROOT_START_COMMAND" > /dev/null 2>&1 + fi + else + startComponent "$RUNTIME_LANGUAGE" "$BUILD_COMMAND" "$START_COMMAND" "." + fi +fi diff --git a/dnsmasq service/README.md b/dnsmasq service/README.md new file mode 100644 index 00000000..28633abd --- /dev/null +++ b/dnsmasq service/README.md @@ -0,0 +1 @@ +# DNS Server \ No newline at end of file diff --git a/gateway/README.md b/gateway/README.md new file mode 100644 index 00000000..3fb79328 --- /dev/null +++ b/gateway/README.md @@ -0,0 +1 @@ +# Gateway (Intern-phxdc-pve1) \ No newline at end of file diff --git a/gateway/extract-fingerprint.sh b/gateway/extract-fingerprint.sh new file mode 100644 index 00000000..dac307b8 --- /dev/null +++ b/gateway/extract-fingerprint.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# A script to collect the client's SSH fingerprint and pass that to the content creation container +# Last Modified June 24th, 2025 by Maxwell Klema +# --------------------- + +CURRENT_TIME=$(date +"%B %d %T") + +USER="create-container" +SSH_CLIENT_IP=$(echo $SSH_CLIENT | awk '{print $1}') +RECENT_LOG=$(journalctl _COMM=sshd | grep "Accepted publickey for $USER from $SSH_CLIENT_IP" | tail -1) +LOGGED_TIME=$(echo $RECENT_LOG | awk '{print $3}') + +#check most recent logged time and current time are only max 2 seconds off since multiple users may log in from same IP over time + +epoch1=$(date -d "today $CURRENT_TIME" +%s) +epoch2=$(date -d "today $LOGGED_TIME" +%s) +diff=$((epoch1 - epoch2)) + +KEY_FINGERPRINT="" + +if [ "$diff" -ge 0 ] && [ "$diff" -le 2 ]; then + KEY_FINGERPRINT=$(echo $RECENT_LOG | grep -o 'SHA256[^ ]*') +fi + +# Export environment variables +export PUBLIC_KEY="$PUBLIC_KEY" +export PROXMOX_USERNAME="$PROXMOX_USERNAME" +export PROXMOX_PASSWORD="$PROXMOX_PASSWORD" +export CONTAINER_NAME="$CONTAINER_NAME" +export CONTAINER_PASSWORD="$CONTAINER_PASSWORD" +export HTTP_PORT="$HTTP_PORT" +export PROJECT_REPOSITORY="$PROJECT_REPOSITORY" +export PROJECT_BRANCH="$PROJECT_BRANCH" +export PROJECT_ROOT="$PROJECT_ROOT" +export REQUIRE_ENV_VARS="$REQUIRE_ENV_VARS" +export CONTAINER_ENV_VARS="$CONTAINER_ENV_VARS" +export INSTALL_COMMAND="$INSTALL_COMMAND" +export BUILD_COMMAND="$BUILD_COMMAND" +export START_COMMAND="$START_COMMAND" +export RUNTIME_LANGUAGE="$RUNTIME_LANGUAGE" +export SERVICES="$SERVICES" +export REQUIRE_SERVICES="$REQUIRE_SERVICES" +export CUSTOM_SERVICES="$CUSTOM_SERVICES" + +# SSH with all SendEnv flags +ssh -o "SendEnv=PUBLIC_KEY PROXMOX_USERNAME PROXMOX_PASSWORD CONTAINER_NAME CONTAINER_PASSWORD HTTP_PORT PROJECT_REPOSITORY PROJECT_BRANCH PROJECT_ROOT REQUIRE_ENV_VARS CONTAINER_ENV_VARS INSTALL_COMMAND BUILD_COMMAND START_COMMAND RUNTIME_LANGUAGE SERVICES REQUIRE_SERVICES CUSTOM_SERVICES" -A create-container@10.15.234.122 diff --git a/gateway/prune_iptables.sh b/gateway/prune_iptables.sh new file mode 100644 index 00000000..abc1aec2 --- /dev/null +++ b/gateway/prune_iptables.sh @@ -0,0 +1,173 @@ +#!/bin/bash + +# Script to prune iptables rules for containers that no longer exist +# Author: Carter Myers + +# Enable strict mode: +# -e: Exit immediately if a command exits with a non-zero status. +# -u: Treat unset variables as an error when substituting. +# -o pipefail: The return value of a pipeline is the status of the last command +# to exit with a non-zero status, or zero if all commands exit successfully. +set -euo pipefail + +# --- Configuration --- +REMOTE_HOST="intern-nginx" +REMOTE_FILE="/etc/nginx/port_map.json" +LOCAL_FILE="/tmp/port_map.json" +LOG_FILE="/var/log/prune_iptables.log" +PVE_NODES=("localhost" "10.15.0.5") + +# Function to log messages with a timestamp +log_message() { + echo "[$(date)] $1" >> "$LOG_FILE" +} + +# --- 1. Fetch port_map.json from remote host --- +log_message "Fetching port_map.json from $REMOTE_HOST..." +if ! scp "$REMOTE_HOST:$REMOTE_FILE" "$LOCAL_FILE" >/dev/null 2>&1; then + log_message "ERROR: Could not fetch $REMOTE_FILE from $REMOTE_HOST" + exit 1 +fi +log_message "Successfully fetched $REMOTE_FILE to $LOCAL_FILE." + +# --- 2. Build list of existing hostnames --- +EXISTING_HOSTNAMES="" +for node in "${PVE_NODES[@]}"; do + log_message "Checking containers on $node..." + if [[ "$node" == "localhost" ]]; then + CTIDS=$(pct list | awk 'NR>1 {print $1}' || true) + log_message "DEBUG: Local CTIDs: [${CTIDS:-}]" + for id in $CTIDS; do + hn=$(pct config "$id" 2>/dev/null | grep -i '^hostname:' | awk '{print $2}' | tr -d '[:space:]' || true) + [[ -n "$hn" ]] && EXISTING_HOSTNAMES+="$hn"$'\n' + done + else + log_message "DEBUG: Checking remote node: $node" + CTIDS_CMD="pct list | awk 'NR>1 {print \$1}'" + CTIDS_OUTPUT=$(ssh "$node" "$CTIDS_CMD" 2>&1 || true) + if [[ "$CTIDS_OUTPUT" =~ "Permission denied" || "$CTIDS_OUTPUT" =~ "Connection refused" || "$CTIDS_OUTPUT" =~ "Host key verification failed" ]]; then + log_message "ERROR: SSH to $node failed: $CTIDS_OUTPUT" + continue + fi + log_message "DEBUG: CTIDs on $node: [${CTIDS_OUTPUT:-}]" + for id in $CTIDS_OUTPUT; do + HN_CMD="pct config $id 2>/dev/null | grep -i '^hostname:' | awk '{print \$2}'" + HN_OUTPUT=$(ssh "$node" "$HN_CMD" 2>&1 || true) + if [[ "$HN_OUTPUT" =~ "Permission denied" || "$HN_OUTPUT" =~ "No such file" ]]; then + log_message "ERROR: Failed to get hostname for $id on $node: $HN_OUTPUT" + continue + fi + hn=$(echo "$HN_OUTPUT" | tr -d '[:space:]') + [[ -n "$hn" ]] && EXISTING_HOSTNAMES+="$hn"$'\n' + done + fi +done + +# Remove any empty lines from EXISTING_HOSTNAMES +EXISTING_HOSTNAMES=$(echo "$EXISTING_HOSTNAMES" | sed '/^$/d') +log_message "Existing hostnames collected:" +log_message "$EXISTING_HOSTNAMES" + +# --- 3. Prune iptables and port_map.json --- +log_message "Pruning iptables and port_map.json..." +cp "$LOCAL_FILE" "$LOCAL_FILE.bak" +log_message "Created backup of $LOCAL_FILE at $LOCAL_FILE.bak" + +HOSTNAMES_IN_JSON=$(jq -r 'keys[]' "$LOCAL_FILE") +mapfile -t EXISTING_ARRAY <<< "$EXISTING_HOSTNAMES" + +# Helper function to check if a hostname exists in the collected list +hostname_exists() { + local h=$(echo "$1" | tr -d '[:space:]') + for existing in "${EXISTING_ARRAY[@]}"; do + if [[ "${h,,}" == "${existing,,}" ]]; then # Case-insensitive comparison + return 0 + fi + done + return 1 +} + +for hostname in $HOSTNAMES_IN_JSON; do + trimmed_hostname=$(echo "$hostname" | tr -d '[:space:]') + if hostname_exists "$trimmed_hostname"; then + log_message "Keeping entry: $trimmed_hostname" + else + ip=$(jq -r --arg h "$hostname" '.[$h].ip // "unknown"' "$LOCAL_FILE") + ports=$(jq -c --arg h "$hostname" '.[$h].ports // {}' "$LOCAL_FILE") + log_message "Stale entry detected: $hostname (IP: $ip, Ports: $ports) - removing..." + + # --- IPTABLES REMOVAL --- + # Capture rules into an array first to avoid subshell issues with 'while read' + mapfile -t RULES_TO_DELETE < <(sudo iptables -t nat -S | grep -w "$ip" || true) # Added sudo, || true to prevent pipefail if grep finds nothing + + if [[ ${#RULES_TO_DELETE[@]} -gt 0 ]]; then + log_message "Found ${#RULES_TO_DELETE[@]} iptables rules for $hostname. Attempting removal..." + for rule in "${RULES_TO_DELETE[@]}"; do + cleaned_rule=$(echo "$rule" | sed 's/^-A /-D /') + log_message "Attempting to remove iptables rule: sudo iptables -t nat $cleaned_rule" + if sudo iptables -t nat $cleaned_rule; then + log_message "Removed iptables rule: $cleaned_rule" + else + log_message "ERROR: Failed to remove iptables rule: $cleaned_rule (Exit status: $?)" + fi + done + else + log_message "No iptables rules found for $hostname to remove." + fi + + # --- JSON ENTRY REMOVAL --- + log_message "Attempting to remove $hostname from local port_map.json..." + if jq "del(.\"$hostname\")" "$LOCAL_FILE" > "${LOCAL_FILE}.tmp"; then + if mv "${LOCAL_FILE}.tmp" "$LOCAL_FILE"; then + log_message "Successfully removed $hostname from local port_map.json." + else + log_message "ERROR: Failed to move temporary file to $LOCAL_FILE for $hostname." + exit 1 # Critical failure, exit + fi + else + log_message "ERROR: jq failed to delete $hostname from $LOCAL_FILE." + exit 1 # Critical failure, exit + fi + + # Confirm deletion from local file + if jq -e --arg h "$hostname" 'has($h)' "$LOCAL_FILE" >/dev/null; then + log_message "ERROR: $hostname still exists in local port_map.json after deletion attempt!" + else + log_message "Confirmed $hostname removed from local port_map.json." + fi + fi +done + +# --- 4. Upload and verify updated file on remote --- +log_message "Uploading updated port_map.json to $REMOTE_HOST..." +TEMP_REMOTE="/tmp/port_map.json" + +if scp "$LOCAL_FILE" "$REMOTE_HOST:$TEMP_REMOTE" >/dev/null 2>&1; then + log_message "Uploaded to $REMOTE_HOST:$TEMP_REMOTE" +else + log_message "ERROR: Failed to upload $TEMP_REMOTE to $REMOTE_HOST" + exit 1 +fi + +# Check if deleted hostnames still exist in uploaded file +log_message "Verifying remote file content..." +for hostname in $HOSTNAMES_IN_JSON; do + if ! hostname_exists "$hostname"; then # Only check for hostnames that *should* have been deleted + if ssh "$REMOTE_HOST" "grep -q '\"$hostname\"' $TEMP_REMOTE"; then + log_message "WARNING: $hostname still exists in uploaded $TEMP_REMOTE on $REMOTE_HOST!" + else + log_message "Verified $hostname was removed in uploaded file on $REMOTE_HOST." + fi + fi +done + +# Move uploaded file into place on the remote host +log_message "Moving uploaded file into final position on $REMOTE_HOST..." +if ssh "$REMOTE_HOST" "sudo cp $TEMP_REMOTE $REMOTE_FILE && sudo chown root:root $REMOTE_FILE && sudo chmod 644 $REMOTE_FILE && rm $TEMP_REMOTE"; then + log_message "Copied updated port_map.json to $REMOTE_FILE on $REMOTE_HOST" +else + log_message "ERROR: Failed to replace $REMOTE_FILE on $REMOTE_HOST" + exit 1 +fi + +log_message "Prune complete." \ No newline at end of file diff --git a/gateway/prune_temp_files.sh b/gateway/prune_temp_files.sh new file mode 100644 index 00000000..1b171fd1 --- /dev/null +++ b/gateway/prune_temp_files.sh @@ -0,0 +1,67 @@ +#!/bin/bash +# Script to prune all temporary files (env vars, protocols, services, and public keys) +# Last Updated July 28th 2025 Maxwell Klema + +LOG_FILE="/var/log/pruneTempFiles.log" + +writeLog() { + echo "[$(date +'%Y-%m-%d %H:%M:%S')]: $1" >> "$LOG_FILE" +} + +# Function to remove temporary environment variable Folders +removeTempEnvVars() { + TEMP_ENV_FOLDER="/var/lib/vz/snippets/container-env-vars" + while read -r line; do + if [[ "$line" == /var/lib/vz/snippets/container-env-vars/env_* ]]; then + rm -rf "$line" > /dev/null 2>&1 + writeLog "Removed temporary environment variable folder: $line" + fi + done < <(find "$TEMP_ENV_FOLDER" -maxdepth 1 -type d -name "env_*") +} + +# Function to remove temporary services file +removeTempServices() { + TEMP_SERVICES_FOLDER="/var/lib/vz/snippets/container-services" + while read -r line; do + if [[ "$line" == /var/lib/vz/snippets/container-services/services_* ]]; then + rm -f "$line" + writeLog "Removed temporary services file: $line" + fi + done < <(find "$TEMP_SERVICES_FOLDER" -maxdepth 1 -type f -name "services_*") +} + +# Function to remove temporary public key files +removeTempPublicKeys() { + TEMP_PUB_FOLDER="/var/lib/vz/snippets/container-public-keys" + while read -r line; do + if [[ "$line" == /var/lib/vz/snippets/container-public-keys/key_* ]]; then + rm -f "$line" + writeLog "Removed temporary public key file: $line" + fi + done < <(find "$TEMP_PUB_FOLDER" -maxdepth 1 -type f -name "key_*") +} + +# Function to remove temporary protocol files +removeTempProtocols() { + TEMP_PROTOCOL_FOLDER="/var/lib/vz/snippets/container-port-maps" + while read -r line; do + if [[ "$line" == /var/lib/vz/snippets/container-port-maps/protocol_list* ]]; then + rm -f "$line" + writeLog "Removed temporary protocol file: $line" + fi + done < <(find "$TEMP_PROTOCOL_FOLDER" -maxdepth 1 -type f -name "protocol_list*") +} + +# Main function to prune all temporary files +pruneTempFiles() { + writeLog "Starting to prune temporary files..." + removeTempEnvVars + removeTempServices + removeTempPublicKeys + removeTempProtocols + writeLog "Finished pruning temporary files." +} + +# Execute the main function +pruneTempFiles +exit 0 \ No newline at end of file diff --git a/nginx reverse proxy/README.md b/nginx reverse proxy/README.md new file mode 100644 index 00000000..f28dcebe --- /dev/null +++ b/nginx reverse proxy/README.md @@ -0,0 +1 @@ +# Nginx Reverse Proxy \ No newline at end of file diff --git a/nginx reverse proxy/nginx.conf b/nginx reverse proxy/nginx.conf new file mode 100644 index 00000000..8b53f751 --- /dev/null +++ b/nginx reverse proxy/nginx.conf @@ -0,0 +1,29 @@ +# /etc/nginx/nginx.conf +user nginx; +worker_processes auto; + +error_log /var/log/nginx/error.log notice; +pid /run/nginx.pid; + +load_module modules/ngx_http_js_module.so; +load_module modules/ngx_stream_js_module.so; + +events { + worker_connections 1024; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format backend '$remote_addr - $remote_user [$time_local] "$request" ' + 'status=$status backend_ip=$backend_ip backend_port=80 ' + '"$http_referer" "$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log backend; + + sendfile on; + keepalive_timeout 65; + + include /etc/nginx/conf.d/*.conf; +} diff --git a/nginx reverse proxy/port_map.js b/nginx reverse proxy/port_map.js new file mode 100644 index 00000000..0843a810 --- /dev/null +++ b/nginx reverse proxy/port_map.js @@ -0,0 +1,94 @@ +// /etc/nginx/port_map.js +// This is a reverse proxy configuration for Nginx that uses JavaScript to dynamically +// map subdomains to specific IP addresses based on a JSON file. +// Code is based off of bluehive-testflight's port_map.js +// Last updated: 06-08-2025 Carter Myers \\ 06-25-2025 Maxwell Klema + +var fs = require('fs'); +var filePath = "/etc/nginx/port_map.json"; // Make sure Nginx has read access +var cachedMapping = null; + +function loadMapping() { + try { + var content = fs.readFileSync(filePath); + cachedMapping = JSON.parse(content); + return true; + } catch (e) { + // Optionally log error + return false; + } +} + +function extractSubdomain(r) { + var host = r.variables.host; + var match = host.match(/^([^.]+)\.opensource\.mieweb\.(com|org)$/); + if (!match) { + r.error("Invalid hostname format: " + host); + return null; + } + return match[1]; +} + +function httpLookup(r) { + if (cachedMapping === null && !loadMapping()) { + r.error("Failed to load port mapping file."); + r.return(500); + return; + } + + var subdomain = extractSubdomain(r); + if (!subdomain) { + r.return(500); + return; + } + + var entry = cachedMapping[subdomain]; + if (!entry) { + if (!loadMapping()) { + r.error("Reload failed."); + r.return(500); + return; + } + entry = cachedMapping[subdomain]; + if (!entry) { + r.error("No entry found for subdomain: " + subdomain); + r.return(500); + return; + } + } + + return entry.ports.http.toString(); // Always return string +} + +function ipLookup(r) { + if (cachedMapping === null && !loadMapping()) { + r.error("Failed to load port mapping file."); + r.return(500); + return; + } + + var subdomain = extractSubdomain(r); + if (!subdomain) { + r.return(500); + return; + } + + var entry = cachedMapping[subdomain]; + if (!entry) { + if (!loadMapping()) { + r.error("Reload failed."); + r.return(500); + return; + } + entry = cachedMapping[subdomain]; + if (!entry) { + r.error("No entry found for subdomain: " + subdomain); + r.return(500); + return; + } + } + + return entry.ip; +} + +export default { httpLookup, ipLookup }; \ No newline at end of file diff --git a/nginx reverse proxy/reverse_proxy.conf b/nginx reverse proxy/reverse_proxy.conf new file mode 100644 index 00000000..596949d3 --- /dev/null +++ b/nginx reverse proxy/reverse_proxy.conf @@ -0,0 +1,76 @@ +js_import port_module from /etc/nginx/port_map.js; +js_set $backend_ip port_module.ipLookup; +js_set $http_port port_module.httpLookup; + +# Define a custom log format +log_format proxy_log '$remote_addr - $host [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent" ' + 'to $backend_ip:80'; + +# Enable access and error logs +access_log /var/log/nginx/reverse_proxy_access.log proxy_log; +error_log /var/log/nginx/reverse_proxy_error.log info; + +# HTTPS, uncomment when nginx gets private key, will not work w/o it +server { + listen 443 ssl; + server_name .opensource.mieweb.org; + + ssl_certificate /root/.acme.sh/opensource.mieweb.org/fullchain.cer; + ssl_certificate_key /root/.acme.sh/opensource.mieweb.org/opensource.mieweb.org.key; + + location / { + if ($backend_ip = "") { + return 404 "Backend IP not found."; + } + + if ($http_port = "") { + return 404 "http port not found."; + } + + proxy_pass http://$backend_ip:$http_port; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; # Use HTTP/1.1 for WebSocket support + proxy_set_header Upgrade $http_upgrade; # Upgrade header for WebSocket support + proxy_set_header Connection "upgrade"; # Connection header for WebSocket support + + # Disable response buffering (important for SSE) + proxy_buffering off; + proxy_cache off; + chunked_transfer_encoding off; + proxy_read_timeout 300s; + + } +} + +server { + listen 80; + server_name .opensource.mieweb.com; + + location / { + if ($backend_ip = "") { + return 404 "Backend IP not found."; + } + + if ($http_port = "") { + return 404 "http port not found."; + } + + proxy_pass http://$backend_ip:$http_port; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # Disable response buffering (important for SSE) + proxy_buffering off; + proxy_cache off; + chunked_transfer_encoding off; + proxy_read_timeout 300s; + + } +} \ No newline at end of file From d785877cb3744fd6f08546d26356736a780cfbf4 Mon Sep 17 00:00:00 2001 From: maxklema Date: Tue, 5 Aug 2025 15:56:24 -0400 Subject: [PATCH 6/8] proxmox launchpad submodule in ci-cd automation --- .gitmodules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index c8f2499b..5d3fd6ee 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ -[submodule "proxmox-launchpad"] - path = proxmox-launchpad +[submodule "ci-cd automation/proxmox-launchpad"] + path = ci-cd automation/proxmox-launchpad url = https://github.com/maxklema/proxmox-launchpad.git From 13a83823795ca79623837e40de28d67f9bb5f7e2 Mon Sep 17 00:00:00 2001 From: maxklema Date: Tue, 5 Aug 2025 15:57:44 -0400 Subject: [PATCH 7/8] proxmox launchpad submodule --- ci-cd automation/proxmox-launchpad/.gitignore | 0 ci-cd automation/proxmox-launchpad/README.md | 308 +++++++++++ ci-cd automation/proxmox-launchpad/action.yml | 494 ++++++++++++++++++ 3 files changed, 802 insertions(+) create mode 100644 ci-cd automation/proxmox-launchpad/.gitignore create mode 100644 ci-cd automation/proxmox-launchpad/README.md create mode 100644 ci-cd automation/proxmox-launchpad/action.yml diff --git a/ci-cd automation/proxmox-launchpad/.gitignore b/ci-cd automation/proxmox-launchpad/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/ci-cd automation/proxmox-launchpad/README.md b/ci-cd automation/proxmox-launchpad/README.md new file mode 100644 index 00000000..17c2a011 --- /dev/null +++ b/ci-cd automation/proxmox-launchpad/README.md @@ -0,0 +1,308 @@ +# Proxmox LaunchPad + +This GitHub action utilizes MIE's open source cluster to manage LXC containers derived from your github repository source code. + +> [!NOTE] +> This project is new and is in a early version. There are likely bugs. If you encounter any, please create an issue. + +## Table of Contents +1. [Video Walkthroughs](#video-walkthroughs) +2. [Sequence Diagram](#sequence-diagram) +3. [Prerequisites](#prerequisites) +4. [Getting Started](#getting-started) + - [Create-Runner Job](#create-runner-workflow-job) + - [Personal Access Token](#creating-a-github-pat-for-your-workflow) + - [Runner Job](#runner-job) + - [Manage-Container Job](#manage-container-workflow-job) +5. [Configurations](#configurations) + - [Basic Properties](#basic-properties) + - [Automatic Deployment Properties](#automatic-deployment-properties) +6. [Important Notes for Automatic Deployment](#important-notes-for-automatic-deployment) +7. [Output](#output) +8. [Sample Workflow File ](#sample-workflow-file) +9. [Misc.](#misc) + +## Video Walkthroughs + +I have created a series of videos to walk you through automatic deployment, both in GitHub and via the command line. + +**[Long-Form]** Proxmox LaunchPad Walkthrough: [Video](https://youtu.be/Xa2L1o-atEM)
+**[Short-Form]** Proxmox LaunchPad Demonstration: [Short](https://youtube.com/shorts/SuK73Jej5j4)
+**[Long-Form]** Automatic Deployment through Command Line: [Video](https://youtu.be/acDW-a32Yr8)
+**[Long-Form]** Getting Started with Creating LXC Continers with Proxmox: [Video](https://youtu.be/sVW3dkBqs4E) + +## Sequence Diagram + +The sequence diagram below describes the sequence of events executed by this Github Action. + +```mermaid +sequenceDiagram + participant Dev as Developer + participant GH as GitHub + participant GHAR as GitHub Actions Runner (hosted) + participant Prox as Proxmox Cluster + participant LXC as LXC Container (Self-hosted Runner) + + Dev->>GH: Push/Create/Delete branch + GH->>GHAR: Trigger workflow + + alt Push/Create event + GHAR->>Prox: Check if LXC container exists for branch + alt Container does not exist + GHAR->>Prox: Clone template, create LXC container + Prox->>LXC: Start container, configure self-hosted runner + GHAR->>LXC: Register self-hosted runner + GHAR->>LXC: Run manage container job (install deps, clone repo, install services, deploy app) + else Container exists + GHAR->>Prox: Call update script + Prox->>LXC: Update container contents, restart with latest branch + end + else Delete event + GHAR->>LXC: Call delete-container script + LXC->>Prox: Remove runner and delete LXC container + end +``` + +## Prerequisites +- Proxmox Datacluster Setup that mirrors/forks [https://github.com/mieweb/opensource-server](https://github.com/mieweb/opensource-server). +- Valid Proxmox Account. + +## Getting Started + +> [!WARNING] +> This Github Action requires you to pass your Github Personal Access Token in order to create runners. If you are comfortable doing this, see [Create-Runner Job](#create-runner-workflow-job). If you are not, you may supply your own self-hosted runner and skip to [Manage-Container Job](#manage-container-workflow-job). + +To use this action in your repository, you need to add the following trigger events in a workflow file: + +```yaml +on: + push: + create: + delete: +``` + +This allows a container to be created/updated on a push command, created when a new branch is created, and deleted when a branch is deleted (like in the case of an accepted PR). + +### Create-Runner Workflow Job + +> [!CAUTION] +> If you choose to pass in your GitHub Personal Access Token, keep it in a secure place and do not share it with anyone. + +#### Creating a GitHub PAT for your Workflow + +This Github Action requires you to pass your Github Personal Access Token in order to create runners. To create a PAT, navigate to your GitHub account settings. Then, on the bottom left-hand side, click developer settings. Navigate to Personal Access Tokens (classic). Click on generate new token, then give your token a name and an expiration date. Finally, select the manage_runners:org permission or the manage_runners:enterprise permission, depending on where your repository is housed. Finally, a token should be generated. Make sure to place the token somewhere securely. Then, add it as a repository secret in the repository that you want to run your workflow file in. + +#### Runner Job + +Before a container can be managed, a self-hosted runner must be installed on the LXC container to complete future workflow jobs. To do this, a github-supplied runner needs to create the container and install/start a custom runner on it that is linked to your repository. + +The create-runner job in your workflow file should look similar to this: + +```yaml +setup-runner: + runs-on: ubuntu-latest + steps: + - name: Install Dependencies + run: | + sudo apt install -y sshpass jq + + - uses: maxklema/proxmox-launchpad@main + with: + proxmox_password: ${{ secrets.PROXMOX_PASSWORD }} + proxmox_username: ${{ secrets.PROXMOX_USERNAME }} + github_pat: ${{ secrets.GH_PAT }} +``` + +The GitHub runner needs to install sshpass (used to authenticate into another host using password authentication) and jq (a popular package for managing/parsing JSON data). + +In the second step, 3 fields are required: `proxmox_username`, `proxmox_password`, and `github_pat` + +To see an explanation for these fields: See [Basic Properties](#basic-properties) + + +### Manage-Container Workflow Job + +The second job in your workflow file should look similar to this: + +> [!NOTE] +> If you chose to run this on your own self-hosted runner instead of the action creating one for you, this will be your first job. Therefore, the needs parameter is not needed. + +```yaml + manage-container: + runs-on: self-hosted + needs: setup-runner + steps: + - uses: maxklema/proxmox-launchpad@test + with: + proxmox_password: ${{ secrets.PROXMOX_PASSWORD }} + proxmox_username: ${{ secrets.PROXMOX_USERNAME }} +``` + + + +## Configurations + +At the very minimum, two configuration settings are required to create any container. With all of these properties specified, you can create an empty container for a branch. + +### Basic Properties + +| Propety | Required? | Description | Supplied by Github? | +| ---------------- | ------ | ---------------------------------------------- | ------ | +| `proxmox_username` | Yes | Your proxmox username assigned to you. | N/A +| `proxmox_password` | Yes | Your proxmox password assigned to you. | N/A +| `http_port` | No | The HTTP Port for your container to listen on. It must be between `80` and `60000`. Default value is `3000`. | N/A +| `linux_distribution` | No | The Linux Distribution that runs on your container. Currently, `rocky` (Rocky 9.5) and `debian` (Debian 12) are available. Default value is `Debian`. | N/A +| `github_pat` | Conditional | Your GitHub Personal Access Token. This is used to manage runners in your containers. This is **only required if you want the workflow to create runners for you.**| Yes. Accessable in developer settings. | + + +There are a few other properties that are not required, but can still be specified in the workflow file: +
+ +| Propety | Required? | Description | Supplied by Github? | +| --------- | ----- | ------------------------------------ | ------ | +| `public_key` | No | Your machine's public key that will be stored in the `~/.ssh/authorized_keys` file of your repository. This allows you to SSH into your container without a password. It is more secure and recommended. | N/A + +### Automatic Deployment Properties + +This github action can *attempt* to automatically deploy services on your container. This is done by fetching your repository contents on the branch that the script is being ran in, installing dependencies/services, and running build and start commands in the background. + +Additionally, with automatic deployment enabled, your container will update on every push command automatically, preventing you from having to SSH into the container and setting it up manually. + +> [!NOTE] +> Properties below that are required assuming you want to automatically deploy your project. If not, none of these properties are needed. + +| Propety | Required? | Description | +| --------- | ----- | ------------------------------------ | +| `project_root` | No | The root directory of your project to deploy from. Example: `/flask-server`. If the root directory is the same as the github root directory, leave blank. +| `services` | No | A JSON array of services to add to your container. Example: ```services: '["mongodb", "docker"]'```. These services will automatically install and start up on container creation. **NOTE**: All services in this list must belong on the list of available services below. If you need a service that is not on the list, see `custom_services`.

Available Services: `meteor`, `mongodb`, `docker`, `redis`, `postgresql`, `apache`, `nginx`, `rabbitmq`, `memcached`, `mariadb`. +| `custom_services` | No | A 2D JSON array of custom service installation commands to install any custom service(s) not in `services`.

Example: ```custom_services: [["sudo apt-get install -y service", "sudo systemctl enable service", "sudo systemctl start service"], ["sudo apt-get install -y service2", "sudo systemctl enable service2", "sudo systemctl start service2"]]``` + + +There are two types of deployments: single component and multi-component deployment. Single component deployment involves deploying only a single service (i.e. a single Flask Server, REACT application, MCP Server, etc.). Multi-component deployment involves deploying more than one service at the same time (i.e. a flask backend and a vite.js backend). + +> [!IMPORTANT] +> In Multi-Component applications, each top-layer key represents the file path, relative to the root directory, to the component (service) to place those variables/commands in. + +| Propety | Required? | Description | Single Component | Multi-Component | +| --------- | ----- | ------------------------------------ | ---- | --- | +| `container_env_vars` | No. | Key-Value Environment variable pairs. | Dictionary in the form of: `{ "api_key": "123", "password": "abc"}` | Dictionary in the form of: `'{"/frontend": { "api_key": "123"}, "/backend": { "password": "abc123" }}'`. +| `install_command` | Yes | Commands to install all project dependencies | String of the installation command, i.e. `npm install`. | Dictionary in the form of: `'{"/frontend": "npm install", "/backend": "pip install -r ../requirements.txt"}'`. +| `build_command` | No | Commands to build project components | String of the build command, i.e. `npm build`. | Dictionary in the form of: `'{"/frontend": "npm build", "/backend": "python3 build.py"}'`. +| `start_command` | Yes | Commands to start project components. | String of the start command, i.e. `npm run`. | Dictionary in the form of: `'{"/frontend": "npm run", "/backend": "flask run"}'`. +| `runtime_language` | Yes | Runtime language of each project component, which can either be `nodejs` or `python`. | String of runtime environment, i.e. `nodejs` | Dictionary in the form of: `'{"/frontend": "nodejs", "/backend": "python"}'`. +| `root_start_command` | No | Command to run at the project directory root for **multi-component applications**. | N/A | String of the command, i.e. `Docker run` + +## Important Notes for Automatic Deployment + +Below are some important things to keep in mind if you want your application to be automatically deployed: +- If you are using meteor, you must start your application with the flags ``--allow-superuser`` and `--port 0.0.0.0:`. + - Meteor is a large package, so deploying it may take more time than other applications. +- When running a service, ensure it is listening on `0.0.0.0` (your IP) instead of only locally at `127.0.0.1`. +- The Github action will fail with an exit code and message if a property is not set up correctly. + + +## Output + +When a container is successfully created (Github Action is successful), you will see an output with all of your container details. This includes all your ports, container ID, container IP Address (internal in 10.15.x.x subnet), public domain name, and ssh command to access your container. + +See an example output below: + +``` +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +šŸ”” COPY THESE PORTS DOWN — For External Access +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +šŸ“Œ Note: Your container listens on SSH Port 22 internally, + but EXTERNAL traffic must use the SSH port listed below: +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +āœ… Hostname Registration: polyglot-test-maxklema-pull-request → 10.15.129.23 +šŸ” SSH Port : 2344 +🌐 HTTP Port : 32000 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +šŸ“¦ Container ID : 136 +🌐 Internal IP : 10.15.129.23 +šŸ”— Domain Name : https://polyglot-test-maxklema-pull-request.opensource.mieweb.org +šŸ› ļø SSH Access : ssh -p 2344 root@polyglot-test-maxklema-pull-request.opensource.mieweb.org +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +NOTE: Additional background scripts are being ran in detached terminal sessions. +Wait up to two minutes for all processes to complete. +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Still not working? Contact Max K. at maxklema@gmail.com +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +``` + +> [!NOTE] +> Even if your GitHub Action workflow is finished, *it may not be accessible right away. Background tasks (migration, template cloning, cleanup, etc) are still be ran in detatched terminal sessions*. Wait a few minutes for all tasks to complete. + +## Sample Workflow File + +The workflow file below is an example workflow designed to deploy a multi-component application with a python (flask) backend and nodejs (vite) frontend: + +**With PAT:** + +```yaml +name: Proxmox Container Management + +on: + push: + create: + delete: + +jobs: + setup-runner: + runs-on: ubuntu-latest + steps: + - name: Install Dependencies + run: | + sudo apt install -y sshpass jq + - uses: maxklema/proxmox-launchpad@test + with: + proxmox_password: ${{ secrets.PROXMOX_PASSWORD }} + proxmox_username: ${{ secrets.PROXMOX_USERNAME }} + github_pat: ${{ secrets.GH_PAT }} + manage-container: + runs-on: self-hosted + needs: setup-runner + steps: + - uses: maxklema/proxmox-launchpad@test + with: + proxmox_password: ${{ secrets.PROXMOX_PASSWORD }} + proxmox_username: ${{ secrets.PROXMOX_USERNAME }} + public_key: ${{ secrets.PUBLIC_KEY }} + container_env_vars: '{"API_KEY": "1234"}' + install_command: npm i + start_command: npm start + runtime_language: nodejs + services: '["mongodb"]' +``` + +**Without PAT:** + +```yaml +name: Proxmox Container Management + +on: + push: + create: + delete: + +jobs: + manage-container: + runs-on: self-hosted + needs: setup-runner + steps: + - uses: maxklema/proxmox-launchpad@test + with: + proxmox_password: ${{ secrets.PROXMOX_PASSWORD }} + proxmox_username: ${{ secrets.PROXMOX_USERNAME }} + public_key: ${{ secrets.PUBLIC_KEY }} + container_env_vars: '{"API_KEY": "1234"}' + install_command: npm i + start_command: npm start + runtime_language: nodejs + services: '["mongodb"]' +``` + + +## Misc. +Feel free to submit a PR/issue here or in [opensource-server](https://github.com/mieweb/opensource-server). +Author: [@maxklema](https://github.com/maxklema) diff --git a/ci-cd automation/proxmox-launchpad/action.yml b/ci-cd automation/proxmox-launchpad/action.yml new file mode 100644 index 00000000..79f26b59 --- /dev/null +++ b/ci-cd automation/proxmox-launchpad/action.yml @@ -0,0 +1,494 @@ +# action.yml +name: Proxmox LaunchPad +description: Manage Proxmox Containers for your Repository. +author: maxklema +branding: + icon: "package" + color: "purple" + +inputs: + proxmox_username: + required: true + proxmox_password: + required: true + container_password: + required: false + public_key: + required: false + http_port: + required: false + project_root: + required: false + container_env_vars: + required: false + install_command: + required: false + build_command: + required: false + start_command: + required: false + runtime_language: + required: false + services: + required: false + custom_services: + required: false + linux_distribution: + required: false + multi_component: + required: false + root_start_command: + required: false + github_pat: + required: false + +runs: + using: "composite" + steps: + - name: Check if action should run + shell: bash + id: should-run + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + GITHUB_EVENT_CREATED: ${{ github.event.created }} + run: | + if [[ "$GITHUB_EVENT_NAME" != "push" ]] || [[ "$GITHUB_EVENT_CREATED" == "false" ]]; then + echo "should_run=true" >> $GITHUB_OUTPUT + else + echo "should_run=false" >> $GITHUB_OUTPUT + echo "Skipping action: Push event with created=true" + fi + + - name: Determine Target Branch Name + shell: bash + id: branch-name + if: steps.should-run.outputs.should_run == 'true' + env: + GITHUB_EVENT_NAME: ${{ github.event_name }} + GITHUB_REF_NAME: ${{ github.ref_name }} + GITHUB_EVENT_REF: ${{ github.event.ref }} + run: | + if [[ "$GITHUB_EVENT_NAME" == "delete" ]]; then + TARGET_BRANCH="$GITHUB_EVENT_REF" + echo "Using deleted branch name: $TARGET_BRANCH" + else + TARGET_BRANCH="$GITHUB_REF_NAME" + echo "Using current branch name: $TARGET_BRANCH" + fi + echo "target_branch=$TARGET_BRANCH" >> $GITHUB_OUTPUT + + - name: Create Runner (If Needed) + shell: bash + id: create-runner + if: steps.should-run.outputs.should_run == 'true' + env: + GITHUB_REPOSITORY_FULL: ${{ github.repository }} + GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }} + TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }} + CONTAINER_PASSWORD: ${{ inputs.container_password }} + PROXMOX_USERNAME: ${{ inputs.proxmox_username }} + PROXMOX_PASSWORD: ${{ inputs.proxmox_password }} + GITHUB_PAT: ${{ inputs.github_pat }} + GITHUB_API: ${{ github.api_url }} + LINUX_DISTRIBUTION: ${{ inputs.linux_distribution }} + PROJECT_REPOSITORY: ${{ github.server_url }}/${{ github.repository }} + GITHUB_JOB: ${{ github.job }} + run: | + REPO_NAME=$(basename "$GITHUB_REPOSITORY_FULL") + CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}" + CONTAINER_NAME=${CONTAINER_NAME,,} + CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g') + export CONTAINER_NAME + + # Auto-detect if this is a runner setup job based on job name or if no container inputs are provided + CREATE_RUNNER_JOB="N" + if [[ "$GITHUB_JOB" == *"setup"* ]] || [[ "$GITHUB_JOB" == *"runner"* ]]; then + CREATE_RUNNER_JOB="Y" + echo "CREATE_RUNNER_JOB=true" >> $GITHUB_OUTPUT + fi + + if [ ! -z "$GITHUB_PAT" ]; then + RESPONSE=$(curl --location ${GITHUB_API}/repos/${GITHUB_REPOSITORY_OWNER}/${REPO_NAME}/actions/runners --header "Authorization: token $GITHUB_PAT") + + while read -r RUN; do + RUNNER_NAME=$(echo "$RUN" | jq -r '.name') + if [ "$RUNNER_NAME" == "$CONTAINER_NAME" ]; then + if [ "${CREATE_RUNNER_JOB^^}" == "N" ]; then + exit 0 #Runner exists, continue to next steps + else + echo "STOP_SCRIPT=true" >> $GITHUB_OUTPUT + exit 0 # Runner exists, continue to next job. + fi + fi + done < <(echo "$RESPONSE" | jq -c '.runners[]') + + echo "Creating a Runner..." + set +e + sshpass -p 'mie123!' ssh \ + -T \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + -o SendEnv="CONTAINER_NAME CONTAINER_PASSWORD PROXMOX_USERNAME PROXMOX_PASSWORD GITHUB_PAT LINUX_DISTRIBUTION PROJECT_REPOSITORY" \ + setup-runner@opensource.mieweb.org + + EXIT_STATUS=$? + + # Exit if a container exists but an associated runner does not. + if [ $EXIT_STATUS != 3 ]; then + echo "Something went wrong with creating/using a runner." + exit 1 + fi + + echo "STOP_SCRIPT=true" >> $GITHUB_OUTPUT + fi + + - name: Container Creation for Branch (If Needed) + id: create-lxc + shell: bash + if: ${{ (github.event_name == 'create' || github.event_name == 'push') && steps.should-run.outputs.should_run == 'true' }} + env: + GITHUB_EVENT: ${{ github.event_name }} + GITHUB_REPOSITORY_FULL: ${{ github.repository }} + GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }} + TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }} + CONTAINER_PASSWORD: ${{ inputs.container_password }} + PROXMOX_USERNAME: ${{ inputs.proxmox_username }} + PROXMOX_PASSWORD: ${{ inputs.proxmox_password }} + PUBLIC_KEY: ${{ inputs.public_key }} + HTTP_PORT: ${{ inputs.http_port }} + DEPLOY_ON_START: ${{ inputs.deploy_on_start }} + PROJECT_REPOSITORY: ${{ github.server_url }}/${{ github.repository }} + PROJECT_BRANCH: ${{ steps.branch-name.outputs.target_branch }} + PROJECT_ROOT: ${{ inputs.project_root }} + REQUIRE_ENV_VARS: ${{ inputs.require_env_vars }} + CONTAINER_ENV_VARS: ${{ inputs.container_env_vars }} + INSTALL_COMMAND: ${{ inputs.install_command }} + START_COMMAND: ${{ inputs.start_command }} + BUILD_COMMAND: ${{ inputs.build_command }} + RUNTIME_LANGUAGE: ${{ inputs.runtime_language }} + REQUIRE_SERVICES: ${{ inputs.require_services }} + SERVICES: ${{ inputs.services }} + CUSTOM_SERVICES: ${{ inputs.custom_services }} + LINUX_DISTRIBUTION: ${{ inputs.linux_distribution }} + MULTI_COMPONENT: ${{ inputs.multi_component }} + ROOT_START_COMMAND: ${{ inputs.root_start_command }} + GITHUB_PAT: ${{ inputs.github_pat }} + GH_ACTION: y + run: | + set +e + REPO_NAME=$(basename "$GITHUB_REPOSITORY_FULL") + CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}" + CONTAINER_NAME=${CONTAINER_NAME,,} + CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g') + export CONTAINER_NAME + STOP_SCRIPT=${{ steps.create-runner.outputs.STOP_SCRIPT }} + if [ "$STOP_SCRIPT" != "true" ]; then + set +e + echo "Running Container Exists..." + + # Determine SSH target based on network location + EXTERNAL_IP=$(dig +short opensource.mieweb.org) + if [ "$EXTERNAL_IP" = "10.15.20.69" ]; then + SSH_TARGET="10.15.0.4" + else + SSH_TARGET="opensource.mieweb.org" + fi + + sshpass -p 'mie123!' ssh \ + -T \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + -o SendEnv="PROXMOX_USERNAME PROXMOX_PASSWORD CONTAINER_NAME PROJECT_REPOSITORY" \ + container-exists@$SSH_TARGET + CONTAINER_EXISTS=$? + if [ $CONTAINER_EXISTS -eq 1 ]; then + echo "FAILED=1" >> $GITHUB_ENV # User does not own the container + elif [ $CONTAINER_EXISTS -eq 0 ]; then + echo "Cloning repository based on $PROJECT_BRANCH branch." + + sshpass -p 'mie123!' ssh \ + -T \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + -o SendEnv="CONTAINER_NAME CONTAINER_PASSWORD PROXMOX_USERNAME PUBLIC_KEY PROXMOX_PASSWORD HTTP_PORT DEPLOY_ON_START PROJECT_REPOSITORY PROJECT_BRANCH PROJECT_ROOT REQUIRE_ENV_VARS CONTAINER_ENV_VARS INSTALL_COMMAND START_COMMAND RUNTIME_LANGUAGE REQUIRE_SERVICES SERVICES CUSTOM_SERVICES LINUX_DISTRIBUTION MULTI_COMPONENT ROOT_START_COMMAND GH_ACTION GITHUB_PAT" \ + create-container@$SSH_TARGET + + CONTAINER_CREATED=$? + echo "CONTAINER_CREATED=true" >> $GITHUB_OUTPUT + if [ $CONTAINER_CREATED -ne 0 ]; then + echo "FAILED=1" >> $GITHUB_ENV + fi + fi + fi + + - name: Container Update on Branch Push + shell: bash + if: ${{ (github.event_name == 'push' && steps.create-lxc.outputs.CONTAINER_CREATED != 'true') && steps.should-run.outputs.should_run == 'true' }} + env: + GITHUB_EVENT: ${{ github.event_name }} + GITHUB_REPOSITORY_FULL: ${{ github.repository }} + GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }} + TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }} + PROXMOX_USERNAME: ${{ inputs.proxmox_username }} + PROXMOX_PASSWORD: ${{ inputs.proxmox_password }} + PROJECT_REPOSITORY: ${{ github.server_url }}/${{ github.repository }} + PROJECT_BRANCH: ${{ steps.branch-name.outputs.target_branch }} + PROJECT_ROOT: ${{ inputs.project_root }} + INSTALL_COMMAND: ${{ inputs.install_command }} + START_COMMAND: ${{ inputs.start_command }} + BUILD_COMMAND: ${{ inputs.build_command }} + RUNTIME_LANGUAGE: ${{ inputs.runtime_language }} + MULTI_COMPONENT: ${{ inputs.multi_component }} + SERVICES: ${{ inputs.services }} + CUSTOM_SERVICES: ${{ inputs.custom_services }} + REQUIRE_SERVICES: ${{ inputs.require_services }} + LINUX_DISTRIBUTION: ${{ inputs.linux_distribution }} + DEPLOY_ON_START: ${{ inputs.deploy_on_start }} + ROOT_START_COMMAND: ${{ inputs.root_start_command }} + GITHUB_PAT: ${{ inputs.github_pat }} + HTTP_PORT: ${{ inputs.http_port }} + GH_ACTION: y + run: | + set +e + echo "Running Container Update..." + REPO_NAME=$(basename "$GITHUB_REPOSITORY_FULL") + CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}" + CONTAINER_NAME=${CONTAINER_NAME,,} + CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g') + export CONTAINER_NAME + echo "$LINUX_DISTRIBUTION" + STOP_SCRIPT=${{ steps.create-runner.outputs.STOP_SCRIPT }} + if [ "$STOP_SCRIPT" != true ]; then + # Determine SSH target based on network location + EXTERNAL_IP=$(dig +short opensource.mieweb.org) + if [ "$EXTERNAL_IP" = "10.15.20.69" ]; then + SSH_TARGET="10.15.0.4" + else + SSH_TARGET="opensource.mieweb.org" + fi + + sshpass -p 'mie123!' ssh \ + -T \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + -o SendEnv="CONTAINER_NAME PROXMOX_USERNAME PROXMOX_PASSWORD PROJECT_REPOSITORY PROJECT_BRANCH PROJECT_ROOT INSTALL_COMMAND START_COMMAND BUILD_COMMAND RUNTIME_LANGUAGE MULTI_COMPONENT ROOT_START_COMMAND DEPLOY_ON_START SERVICES CUSTOM_SERVICES REQUIRE_SERVICES LINUX_DISTRIBUTION GH_ACTION HTTP_PORT" \ + update-container@$SSH_TARGET + UPDATE_EXIT=$? + if [ $UPDATE_EXIT -ne 0 ]; then + echo "FAILED=1" >> $GITHUB_ENV + fi + fi + + - name: Container Deletion on Branch Deletion (Check) + shell: bash + if: ${{ github.event_name == 'delete' && steps.should-run.outputs.should_run == 'true' }} + env: + GITHUB_EVENT: ${{ github.event_name }} + GITHUB_REPOSITORY_FULL: ${{ github.repository }} + GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }} + TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }} + PROXMOX_USERNAME: ${{ inputs.proxmox_username }} + PROXMOX_PASSWORD: ${{ inputs.proxmox_password }} + PROJECT_REPOSITORY: ${{ github.server_url }}/${{ github.repository }} + GITHUB_PAT: ${{ inputs.github_pat }} + run: | + set +e + REPO_NAME=$(basename "$GITHUB_REPOSITORY_FULL") + CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}" + CONTAINER_NAME=${CONTAINER_NAME,,} + CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g') + export CONTAINER_NAME + STOP_SCRIPT=${{ steps.create-runner.outputs.STOP_SCRIPT }} + if [ "$STOP_SCRIPT" != true ]; then + # Determine SSH target based on network location + EXTERNAL_IP=$(dig +short opensource.mieweb.org) + if [ "$EXTERNAL_IP" = "10.15.20.69" ]; then + SSH_TARGET="10.15.0.4" + else + SSH_TARGET="opensource.mieweb.org" + fi + + sshpass -p 'mie123!' ssh \ + -T \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + -o SendEnv="PROXMOX_USERNAME PROXMOX_PASSWORD CONTAINER_NAME GITHUB_PAT PROJECT_REPOSITORY" \ + delete-container@$SSH_TARGET + DELETE_EXIT=$? + if [ $DELETE_EXIT -ne 0 ]; then + echo "FAILED=1" >> $GITHUB_ENV + fi + fi + + - name: Check if branch is part of a PR and comment + shell: bash + id: check-pr + if: steps.should-run.outputs.should_run == 'true' && steps.create-runner.outputs.CREATE_RUNNER_JOB != 'true' && env.FAILED != '1' + env: + GITHUB_TOKEN: ${{ inputs.github_pat }} + GITHUB_REPOSITORY: ${{ github.repository }} + TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }} + GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }} + RUN_ID: ${{ github.run_id }} + run: | + if [ -z "$GITHUB_TOKEN" ]; then + echo "pr_number=" >> $GITHUB_OUTPUT + echo "is_pr=false" >> $GITHUB_OUTPUT + echo "No GitHub token provided, skipping PR detection" + exit 0 + fi + + # Check if this branch has an open PR + PR_DATA=$(curl -s -H "Authorization: token $GITHUB_TOKEN" \ + "https://api.github.com/repos/$GITHUB_REPOSITORY/pulls?state=open&head=${{ github.repository_owner }}:$TARGET_BRANCH") + + PR_NUMBER=$(echo "$PR_DATA" | jq -r '.[0].number // empty') + + if [ -n "$PR_NUMBER" ] && [ "$PR_NUMBER" != "null" ]; then + echo "pr_number=$PR_NUMBER" >> $GITHUB_OUTPUT + echo "is_pr=true" >> $GITHUB_OUTPUT + echo "Branch $TARGET_BRANCH is part of PR #$PR_NUMBER" + + # Generate container name + REPO_NAME=$(basename "$GITHUB_REPOSITORY") + CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}" + CONTAINER_NAME=${CONTAINER_NAME,,} + CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g') + + # Create initial comment on PR + CONTAINER_URL="https://${CONTAINER_NAME}.opensource.mieweb.org" + + COMMENT_BODY="## šŸš€ Proxmox LaunchPad Action + **Expected URL**: [$CONTAINER_NAME]($CONTAINER_URL) *(will be available once deployment completes)* + **Status**: āœ… Application was deployed according to workflow configurations. + **Branch**: \`$TARGET_BRANCH\` + **Run ID**: [\`$RUN_ID\`](https://github.com/$GITHUB_REPOSITORY/actions/runs/$RUN_ID) + **Container Name**: \`$CONTAINER_NAME\` + + > This comment was automatically generated by Proxmox LaunchPad: The fastest way to deploy your repository code. To use Proxmox in your own repository, see: [Proxmox LaunchPad](https://github.com/marketplace/actions/proxmox-launchpad)." + + # Use jq to safely build the JSON payload from the variable + JSON_PAYLOAD=$(jq -n --arg body "$COMMENT_BODY" '{body: $body}') + + # Post the initial comment + curl -s -X POST \ + -H "Authorization: token $GITHUB_TOKEN" \ + -H "Content-Type: application/json" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/$GITHUB_REPOSITORY/issues/$PR_NUMBER/comments" \ + -d "$JSON_PAYLOAD" > /dev/null + + echo "Initial comment posted to PR #$PR_NUMBER" + else + echo "pr_number=" >> $GITHUB_OUTPUT + echo "is_pr=false" >> $GITHUB_OUTPUT + echo "Branch $TARGET_BRANCH is not part of any open PR" + fi + + - name: Comment on PR on Failure + if: env.FAILED == '1' + shell: bash + env: + GITHUB_TOKEN: ${{ inputs.github_pat }} + GITHUB_REPOSITORY: ${{ github.repository }} + TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }} + RUN_ID: ${{ github.run_id }} + GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }} + run: | + if [ -z "$GITHUB_TOKEN" ]; then + echo "Cannot comment on PR: missing token" + exit 1 + fi + + # Check if this branch has an open PR + PR_DATA=$(curl -s -H "Authorization: token $GITHUB_TOKEN" \ + "https://api.github.com/repos/$GITHUB_REPOSITORY/pulls?state=open&head=${GITHUB_REPOSITORY_OWNER}:$TARGET_BRANCH") + + PR_NUMBER=$(echo "$PR_DATA" | jq -r '.[0].number // empty') + + if [ -z "$PR_NUMBER" ] || [ "$PR_NUMBER" == "null" ]; then + echo "Not a pull request, skipping failure comment." + exit 0 + fi + + REPO_NAME=$(basename "$GITHUB_REPOSITORY") + CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}" + CONTAINER_NAME=${CONTAINER_NAME,,} + CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g') + + CONTAINER_URL="https://${CONTAINER_NAME}.opensource.mieweb.org" + + COMMENT_BODY="## šŸš€ Proxmox LaunchPad Action + **Expected URL**: [$CONTAINER_NAME]($CONTAINER_URL) *(will be available once deployment completes)* + **Status**: āŒ Application failed to deploy. View [\`$RUN_ID\`](https://github.com/$GITHUB_REPOSITORY/actions/runs/$RUN_ID) to see logs. + **Branch**: \`$TARGET_BRANCH\` + **Run ID**: [\`$RUN_ID\`](https://github.com/$GITHUB_REPOSITORY/actions/runs/$RUN_ID) + **Container Name**: \`$CONTAINER_NAME\` + + > This comment was automatically generated by Proxmox LaunchPad: The fastest way to deploy your repository code. To use Proxmox in your own repository, see: [Proxmox LaunchPad](https://github.com/marketplace/actions/proxmox-launchpad)." + + JSON_PAYLOAD=$(jq -n --arg body "$COMMENT_BODY" '{body: $body}') + + # Post the comment + curl -s -X POST \ + -H "Authorization: token $GITHUB_TOKEN" \ + -H "Content-Type: application/json" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/$GITHUB_REPOSITORY/issues/$PR_NUMBER/comments" \ + -d "$JSON_PAYLOAD" > /dev/null + + echo "Failure comment posted to PR #$PR_NUMBER" + exit 1 + + - name: Create GitHub Deployment (Default Branch) + if: github.ref == format('refs/heads/{0}', github.event.repository.default_branch) && env.FAILED != '1' + shell: bash + env: + GITHUB_TOKEN: ${{ inputs.github_pat }} + GITHUB_REPOSITORY: ${{ github.repository }} + GITHUB_SHA: ${{ github.sha }} + GITHUB_REF: ${{ github.ref }} + GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }} + run: | + REPO_NAME=$(basename "$GITHUB_REPOSITORY") + CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${GITHUB_REF#refs/heads/}" + CONTAINER_NAME=${CONTAINER_NAME,,} + CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g') + CONTAINER_URL="https://${CONTAINER_NAME}.opensource.mieweb.org" + DEPLOYMENT_RESPONSE=$(curl -s -X POST \ + -H "Authorization: token $GITHUB_TOKEN" \ + -H "Content-Type: application/json" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/$GITHUB_REPOSITORY/deployments" \ + -d '{ + "ref": "'${GITHUB_REF#refs/heads/}'", + "required_contexts": [], + "environment": "Preview - '$GITHUB_REPOSITORY'", + "description": "Deployment triggered from Proxmox LaunchPad action.", + "sha": "'$GITHUB_SHA'" + }') + DEPLOYMENT_ID=$(echo "$DEPLOYMENT_RESPONSE" | jq -r '.id') + if [ "$DEPLOYMENT_ID" != "null" ] && [ -n "$DEPLOYMENT_ID" ]; then + curl -s -X POST \ + -H "Authorization: token $GITHUB_TOKEN" \ + -H "Content-Type: application/json" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/$GITHUB_REPOSITORY/deployments/$DEPLOYMENT_ID/statuses" \ + -d '{ + "state": "success", + "description": "Deployment completed successfully.", + "environment": "Preview - '$GITHUB_REPOSITORY'", + "environment_url": "'$CONTAINER_URL'" + }' > /dev/null + echo "Deployment created and marked as successful for default branch: ${GITHUB_REF#refs/heads/}" + echo "Deployment URL: $CONTAINER_URL" + else + echo "Deployment creation failed." + fi + + - name: Catch All Failure Step + if: env.FAILED == '1' + shell: bash + run: | + echo "Workflow failed. See previous steps for details." + exit 1 From 65558bb946e9959a9fcaf57d97e19e827c1b6270 Mon Sep 17 00:00:00 2001 From: maxklema Date: Tue, 5 Aug 2025 15:59:48 -0400 Subject: [PATCH 8/8] proxmox launchpad submodule --- .gitmodules | 4 ++-- proxmox-launchpad | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 160000 proxmox-launchpad diff --git a/.gitmodules b/.gitmodules index 5d3fd6ee..c8f2499b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ -[submodule "ci-cd automation/proxmox-launchpad"] - path = ci-cd automation/proxmox-launchpad +[submodule "proxmox-launchpad"] + path = proxmox-launchpad url = https://github.com/maxklema/proxmox-launchpad.git diff --git a/proxmox-launchpad b/proxmox-launchpad new file mode 160000 index 00000000..038aff5a --- /dev/null +++ b/proxmox-launchpad @@ -0,0 +1 @@ +Subproject commit 038aff5ad0eacd9f77935ae8819ab59da13fc981