diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..d5300330
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+Wazuh/node_modules/
+Wazuh/package-lock.json
+Wazuh/package.json
+Wazuh/.env
+Wazuh/.gitignore
+
diff --git a/README.md b/README.md
index 85c8fc88..7fec6147 100644
--- a/README.md
+++ b/README.md
@@ -7,7 +7,7 @@ This repository contains configuration files and scripts for managing a Proxmox-
## Cluster Graph
```mermaid
-
+
graph TD
%% Repository Structure
REPO[opensource-mieweb Repository]
@@ -37,14 +37,21 @@ graph TD
USER[User Access] --> DNS
DNS --> NGINX
NGINX --> CONTAINER
-
+
+ %% Wazuh Integration
+ CONTAINER --> |Wazuh Agent| WAGENT[Wazuh Agent]
+ WAGENT --> |reports to| WMANAGER[Wazuh Manager]
+ WMANAGER --> |sends data to| WINDEXER[Wazuh Indexer]
+
%% Styling
classDef folder fill:#e3f2fd,stroke:#1976d2,stroke-width:2px
classDef system fill:#f1f8e9,stroke:#689f38,stroke-width:2px
+ classDef wazuh fill:#fffde7,stroke:#fbc02d,stroke-width:2px
classDef user fill:#fff3e0,stroke:#f57c00,stroke-width:2px
-
+
class CICD,CC,DNS,GW,LDAP,NGINX,PL folder
class CONTAINER system
+ class WAGENT,WMANAGER,WINDEXER wazuh
class USER user
```
@@ -72,12 +79,17 @@ graph TD
- [`LDAP/`](LDAP/):
Contains LDAP authentication infrastructure including a custom Node.js LDAP server that bridges database user management with LDAP protocols, and automated LDAP client configuration tools for seamless container authentication integration. LDAP Server configured to reference the [Proxmox VE Users @pve realm](https://pve.proxmox.com/wiki/User_Management) with optional [Push Notification 2FA](https://github.com/mieweb/mieweb_auth_app)
+### Security
+
+- [`Wazuh/`](Wazuh/):
+ We utilize Wazuh, an opensource security management platform, to provide vulnerability detection and threat hunting services to our cluster. Our custom decoders and rules revolve mainly around mitigating SSH/PAM bruteforce attacks in both our hypervisors and individual containers.
+
### GitHub Action Integration
- [`proxmox-launchpad/`](proxmox-launchpad/):
The Proxmox LaunchPad GitHub Action for automated container deployment directly from GitHub repositories, supporting both single and multi-component applications.
-- [A LDAPServer Server](https://github.com/mieweb/LDAPServer):
+- [`LDAPServer`](https://github.com/mieweb/LDAPServer):
LDAP Server configured to reference the [Proxmox VE Users @pve realm](https://pve.proxmox.com/wiki/User_Management) with optional [Push Notification 2FA](https://github.com/mieweb/mieweb_auth_app)
## Create a Container
@@ -99,6 +111,7 @@ If you have an account in the [opensource-mieweb](https://opensource.mieweb.org:
- **GitHub Integration**: The Proxmox LaunchPad action automates the entire process from repository push to live deployment, including dependency installation, service configuration, and application startup.
- **CI/CD Pipeline**: Automated scripts used by [Proxmox LaunchPad](#proxmox-launchpad) to handle container updates, existence checks, and cleanup operations to maintain a clean and efficient hosting environment.
- **LDAP Server**: All LXC Container Authentication is handled by a centralized LDAP server housed in the cluster. Each Container is configured with SSSD, which communicates with the LDAP server to verify/authenitcate user credentials. This approach is more secure than housing credentials locally.
+- **Wazuh**: Both containers and hypervisors are Wazuh Agents, and send all logs to our centralized Wazuh Manager, which matches each log against a large database of decoders and rules. If certain rules are triggered, active response mechanisms respond by triggering certain commands, a common one being a firewall drop of all packets originating from a certain source IP.
## Proxmox LaunchPad
diff --git a/Wazuh/firewall-block.sh b/Wazuh/firewall-block.sh
new file mode 100755
index 00000000..6f909c4b
--- /dev/null
+++ b/Wazuh/firewall-block.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+# Custom Firewall Drop Script for Wazuh
+# Template Generated by Copilot
+# Last Modified by Maxwell Klema on August 11th, 2025
+# ---------------------------------------------------
+
+# Usage: This script reads JSON input from STDIN
+# Expected JSON format:
+# {
+# "version": 1,
+# "command": "add|delete",
+# "parameters": {
+# "alert": {
+# "data": {
+# "srcip": "IP_ADDRESS"
+# }
+# }
+# }
+# }
+# Full list: https://documentation.wazuh.com/current/user-manual/capabilities/active-response/custom-active-response-scripts.html
+
+# Set script name for logging
+LOG_FILE="/var/ossec/logs/active-responses.log"
+
+# Function to log messages
+log_message() {
+ echo "[$(date '+%Y/%m/%d %H:%M:%S')] $1" >> "$LOG_FILE"
+}
+
+# Read JSON input from STDIN
+read -r INPUT
+
+# Parse JSON to extract command and srcip
+COMMAND=$(echo "$INPUT" | jq -r '.command // empty')
+SRCIP=$(echo "$INPUT" | jq -r '.parameters.alert.data.srcip // empty')
+
+# Validate input
+if [[ -z "$COMMAND" || -z "$SRCIP" ]]; then
+ log_message "ERROR: Invalid input - missing command or srcip"
+ exit 1
+fi
+
+# Validate IP address format
+if ! [[ "$SRCIP" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
+ log_message "ERROR: Invalid IP address format: $SRCIP"
+ exit 1
+fi
+
+# Function to add firewall rule
+add_rule() {
+ local ip="$1"
+ ipset add blacklist "$ip" 2>/dev/null
+ if [[ $? -eq 0 ]]; then
+ log_message "Successfully added $ip to blacklist"
+ else
+ log_message "WARNING: Failed to add $ip to blacklist or it may already exist"
+ fi
+}
+
+# Function to remove firewall rule
+remove_rule() {
+ local ip="$1"
+
+ ipset del blacklist "$ip" 2>/dev/null
+ if [[ $? -eq 0 ]]; then
+ log_message "Successfully removed $ip from blacklist"
+ else
+ log_message "WARNING: Failed to remove $ip from blacklist or it may not exist"
+ fi
+}
+
+# Execute based on command
+case "$COMMAND" in
+ "add")
+ add_rule "$SRCIP"
+ ;;
+ "delete")
+ remove_rule "$SRCIP"
+ ;;
+ *)
+ log_message "ERROR: Unknown command: $COMMAND"
+ exit 1
+ ;;
+esac
+
+exit 0
\ No newline at end of file
diff --git a/Wazuh/local_decoders.xml b/Wazuh/local_decoders.xml
new file mode 100644
index 00000000..3c7818ef
--- /dev/null
+++ b/Wazuh/local_decoders.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ kernel
+ SSH_NEW: IN=
+ SSH_NEW: IN=(\S+) OUT=(\S*) MAC=(\S+) SRC=(\S+) DST=(\S+) LEN=(\d+) TOS=(\S+) PREC=(\S+) TTL=(\d+) ID=(\d+) DF PROTO=(\S+) SPT=(\d+) DPT=(\d+) WINDOW=(\d+) RES=(\S+) CWR ECE SYN URGP=(\d+)
+ in_interface,out_interface,mac,srcip,dstip,len,tos,prec,ttl,id,protocol,srcport,dstport,window,res,urgp
+
\ No newline at end of file
diff --git a/Wazuh/local_rules.xml b/Wazuh/local_rules.xml
new file mode 100644
index 00000000..c6009dfa
--- /dev/null
+++ b/Wazuh/local_rules.xml
@@ -0,0 +1,38 @@
+
+
+
+
+
+
+
+
+ 5716
+ 1.1.1.1
+ sshd: authentication failed from IP 1.1.1.1.
+ authentication_failed,pci_dss_10.2.4,pci_dss_10.2.5,
+
+
+
+
+
+
+
+
+ hypervisor-ssh-bruteforce
+ SSH connection attempt detected by iptables from $(srcip) to port $(dstport)
+
+
+
+
+ 100002
+ intern-phxdc-pve1
+
+
+ Possible SSH brute force detected - $(frequency) attempts in $(timeframe) seconds from $(srcip)
+
+ T1110
+
+
+
\ No newline at end of file
diff --git a/Wazuh/manage-agents.js b/Wazuh/manage-agents.js
new file mode 100644
index 00000000..e8e0e12c
--- /dev/null
+++ b/Wazuh/manage-agents.js
@@ -0,0 +1,140 @@
+// Script to manage Wazuh Agents on the Wazuh Manager
+// Last Modified on August 7th, 2025 by Maxwell Klema
+// -------------------------------------------------
+
+const axios = require('axios');
+const env = require('dotenv').config({ path: '/var/lib/vz/snippets/Wazuh/.env', quiet: true});
+
+const authConfig = {
+ method: 'post',
+ url: 'https://wazuh-server.opensource.mieweb.org/security/user/authenticate',
+ maxBodyLength: Infinity,
+ headers: {
+ 'Authorization': `Basic ${Buffer.from(`${process.env.API_USERNAME}:${process.env.API_PASSWORD}`).toString('base64')}`,
+ }
+};
+
+async function getJWTToken() {
+ const response = await axios.request(authConfig);
+ if (response.status !== 200) {
+ return null;
+ }
+ return response.data.data.token;
+}
+
+async function getAgents() {
+
+ const JWT = await getJWTToken();
+ if (!JWT) {
+ console.log('fail');
+ return;
+ }
+
+ let config = {
+ method: 'get',
+ url: 'https://wazuh-server.opensource.mieweb.org/agents?',
+ maxBodyLength: Infinity,
+ headers: {
+ 'content-type': 'application/json',
+ 'Authorization': `Bearer ${JWT}`,
+ }
+ }
+
+ axios.request(config).then((response) => {
+ const agents = response.data.data.affected_items;
+ if (!agents || agents.length === 0) {
+ console.log('fail');
+ return;
+ }
+
+ agents.forEach(agent => {
+ console.log(agent.name);
+ });
+ });
+}
+
+async function addAgent(containerName, containerIP) {
+
+ const JWT = await getJWTToken();
+ if (!JWT) {
+ console.log('fail');
+ return;
+ }
+
+ // Add the Agent to the Manager
+ let agentConfig = {
+ method: 'post',
+ url: 'https://wazuh-server.opensource.mieweb.org/agents?pretty=true',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Authorization': `Bearer ${JWT}`
+ },
+ data: {
+ 'name': containerName,
+ 'ip': containerIP
+ }
+ };
+
+ const response = await axios.request(agentConfig);
+ if (response.status !== 200) {
+ console.log('fail');
+ }
+ const agentKey = response.data.data.key;
+ console.log(agentKey);
+}
+
+async function deleteAgent(agentName) {
+
+ const JWT = await getJWTToken();
+
+ if (!JWT) {
+ console.log('fail');
+ return;
+ }
+
+ const agent_id = await getAgentIDByName(agentName, JWT);
+
+ if (!agent_id) {
+ console.log('fail');
+ return;
+ }
+
+ let config = {
+ method: 'delete',
+ url: `https://wazuh-server.opensource.mieweb.org/agents/?agents_list=${agent_id}&status=all&older_than=0s`,
+ maxBodyLength: Infinity,
+ headers: {
+ 'content-type': 'application/json',
+ 'Authorization': `Bearer ${JWT}`,
+ }
+ };
+
+ axios.request(config).then((response) => {
+ if (response.status !== 200) {
+ console.log('fail');
+ return;
+ }
+ console.log('success');
+ }).catch((error) => {
+ console.log('fail');
+ });
+}
+
+async function getAgentIDByName(agentName, JWT) {
+ let config = {
+ method: 'get',
+ url: 'https://wazuh-server.opensource.mieweb.org/agents/?name=' + agentName,
+ maxBodyLength: Infinity,
+ headers: {
+ 'Authorization': `Bearer ${JWT}`
+ }
+ };
+
+ const response = await axios.request(config);
+ if (response.status !== 200) {
+ return null;
+ }
+ return response.data.data.affected_items[0].id;
+}
+
+module.exports = { getAgents, addAgent, deleteAgent };
\ No newline at end of file
diff --git a/Wazuh/ossec.conf b/Wazuh/ossec.conf
new file mode 100644
index 00000000..4e5f6fcd
--- /dev/null
+++ b/Wazuh/ossec.conf
@@ -0,0 +1,350 @@
+
+
+
+
+ yes
+ yes
+ no
+ no
+ 10m
+ 0
+ yes
+
+
+
+ 3
+
+
+
+
+ plain
+ 2
+
+
+
+ secure
+ 1514
+ tcp
+ 131072
+
+
+
+
+ no
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+
+
+ 43200
+
+ etc/rootcheck/rootkit_files.txt
+ etc/rootcheck/rootkit_trojans.txt
+
+ yes
+
+ /var/lib/containerd
+ /var/lib/docker/overlay2
+
+
+
+ yes
+ 1800
+ 1d
+ yes
+
+ wodles/java
+ wodles/ciscat
+
+
+
+
+ yes
+ yes
+ /var/log/osquery/osqueryd.results.log
+ /etc/osquery/osquery.conf
+ yes
+
+
+
+
+ no
+ 1h
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+
+
+
+ 10
+
+
+
+
+ yes
+ yes
+ 12h
+ yes
+
+
+
+ yes
+ 12h
+ yes
+ yes
+ yes
+ yes
+ 60m
+
+
+
+ yes
+
+ https://10.15.101.106:9200
+
+
+
+ /etc/filebeat/certs/root-ca.pem
+
+ /etc/filebeat/certs/wazuh-server.pem
+ /etc/filebeat/certs/wazuh-server-key.pem
+
+
+
+
+
+ no
+
+
+ 43200
+
+ yes
+
+
+ yes
+
+ no
+
+
+ /etc,/usr/bin,/usr/sbin
+ /bin,/sbin,/boot
+
+
+ /etc/mtab
+ /etc/hosts.deny
+ /etc/mail/statistics
+ /etc/random-seed
+ /etc/random.seed
+ /etc/adjtime
+ /etc/httpd/logs
+ /etc/utmpx
+ /etc/wtmpx
+ /etc/cups/certs
+ /etc/dumpdates
+ /etc/svc/volatile
+
+
+ .log$|.swp$
+
+
+ /etc/ssl/private.key
+
+ yes
+ yes
+ yes
+ yes
+
+
+ 10
+
+
+ 50
+
+
+
+ yes
+ 5m
+ 10
+
+
+
+
+
+
+ 10.15.0.0/16
+
+
+
+ disable-account
+ disable-account
+ yes
+
+
+
+ restart-wazuh
+ restart-wazuh
+
+
+
+ firewall-drop
+ custom-firewall-drop.sh
+ no
+
+
+
+ host-deny
+ host-deny
+ yes
+
+
+
+ route-null
+ route-null
+ yes
+
+
+
+ win_route-null
+ route-null.exe
+ yes
+
+
+
+ netsh
+ netsh.exe
+ yes
+
+
+
+ firewall-drop
+ local
+ 5763
+
+
+
+ firewall-drop
+ local
+ 100003
+
+
+
+ firewall-drop
+ local
+ 5712
+
+
+
+ firewall-drop
+ local
+ 5551
+
+
+
+
+
+ firewall-drop
+ local
+ 15
+
+
+
+
+ command
+ df -P
+ 360
+
+
+
+ full_command
+ netstat -tulpn | sed 's/\([[:alnum:]]\+\)\ \+[[:digit:]]\+\ \+[[:digit:]]\+\ \+\(.*\):\([[:digit:]]*\)\ \+\([0-9\.\:\*]\+\).\+\ \([[:digit:]]*\/[[:alnum:]\-]*\).*/\1 \2 == \3 == \4 \5/' | sort -k 4 -g | sed 's/ == \(.*\) ==/:\1/' | sed 1,2d
+ netstat listening ports
+ 360
+
+
+
+ full_command
+ last -n 20
+ 360
+
+
+
+
+ ruleset/decoders
+ ruleset/rules
+ 0215-policy_rules.xml
+ etc/lists/audit-keys
+ etc/lists/amazon/aws-eventnames
+ etc/lists/security-eventchannel
+
+
+ etc/decoders
+ etc/rules
+
+
+
+ yes
+ 1
+ 64
+ 15m
+
+
+
+
+ no
+ 1515
+ no
+ yes
+ no
+ HIGH:!ADH:!EXP:!MD5:!RC4:!3DES:!CAMELLIA:@STRENGTH
+
+ no
+ etc/sslmanager.cert
+ etc/sslmanager.key
+ no
+
+
+
+ wazuh
+ node01
+ master
+
+ 1516
+ 0.0.0.0
+
+ NODE_IP
+
+ no
+ yes
+
+
+
+
+
+
+ journald
+ journald
+
+
+
+ syslog
+ /var/ossec/logs/active-responses.log
+
+
+
+ syslog
+ /var/log/dpkg.log
+
+
+
\ No newline at end of file
diff --git a/Wazuh/package-lock.json b/Wazuh/package-lock.json
new file mode 100644
index 00000000..c7d71f14
--- /dev/null
+++ b/Wazuh/package-lock.json
@@ -0,0 +1,280 @@
+{
+ "name": "Wazuh",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "dependencies": {
+ "axios": "^1.11.0",
+ "dotenv": "^17.2.1"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
+ },
+ "node_modules/axios": {
+ "version": "1.11.0",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-1.11.0.tgz",
+ "integrity": "sha512-1Lx3WLFQWm3ooKDYZD1eXmoGO9fxYQjrycfHFC8P0sCfQVXyROp0p9PFWBehewBOdCwHc+f/b8I0fMto5eSfwA==",
+ "dependencies": {
+ "follow-redirects": "^1.15.6",
+ "form-data": "^4.0.4",
+ "proxy-from-env": "^1.1.0"
+ }
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/dotenv": {
+ "version": "17.2.1",
+ "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.2.1.tgz",
+ "integrity": "sha512-kQhDYKZecqnM0fCnzI5eIv5L4cAe/iRI+HqMbO/hbRdTAeXDG+M9FjipUxNfbARuEg4iHIbhnhs78BCHNbSxEQ==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://dotenvx.com"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/follow-redirects": {
+ "version": "1.15.11",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz",
+ "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==",
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://github.com/sponsors/RubenVerborgh"
+ }
+ ],
+ "engines": {
+ "node": ">=4.0"
+ },
+ "peerDependenciesMeta": {
+ "debug": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
+ "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/proxy-from-env": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz",
+ "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="
+ }
+ }
+}
diff --git a/Wazuh/package.json b/Wazuh/package.json
new file mode 100644
index 00000000..cb9f6521
--- /dev/null
+++ b/Wazuh/package.json
@@ -0,0 +1,6 @@
+{
+ "dependencies": {
+ "axios": "^1.11.0",
+ "dotenv": "^17.2.1"
+ }
+}
diff --git a/Wazuh/prune_agents.sh b/Wazuh/prune_agents.sh
new file mode 100755
index 00000000..32e0f03a
--- /dev/null
+++ b/Wazuh/prune_agents.sh
@@ -0,0 +1,73 @@
+# This script scrapes containers that no longer exist and removes them from the Wazuh manager.
+# Last Modified by Maxwell Klema on August 7th, 2025
+# --------------------------------------------------
+
+LOG_FILE="/var/log/prune_agents.log"
+PCT_BIN="/usr/sbin/pct"
+PVE_NODES=("localhost" "10.15.0.5")
+
+write_log() {
+ message="$1"
+ echo "[$(date '+%Y-%m-%d %H:%M:%S')] - $message" >> "$LOG_FILE"
+}
+
+# Scrape a list of containers on PVE1 and PVE2
+
+EXISTING_HOSTNAMES=""
+EXISTING_AGENTS=""
+
+for node in ${PVE_NODES[@]}; do
+ if [ "$node" == "localhost" ]; then
+ if ! command -v "$PCT_BIN" &> /dev/null; then
+ write_log "❌ Error: $PCT_BIN not found on localhost."
+ exit 1
+ fi
+
+ HOSTNAMES=$("$PCT_BIN" list | awk 'NR>1 {print $3}' || true)
+
+ while read -r hostname; do
+ [[ -n "$hostname" ]] && EXISTING_HOSTNAMES+="$hostname"$'\n'
+ done <<< "$HOSTNAMES"
+
+ write_log "Retrieved hostnames from localhost:"
+ write_log "$HOSTNAMES"
+ else
+ HOSTNAMES_CMD="${PCT_BIN} list | awk 'NR>1 {print \$3}' || true"
+ HOSTNAMES=$(ssh "$node" "$HOSTNAMES_CMD")
+
+ if [[ "$HOSTNAMES" =~ "Permission denied" || "$HOSTNAMES" =~ "Connection refused" || "$HOSTNAMES" =~ "Host key verification failed" ]]; then
+ write_log "ERROR: SSH to $node failed: $HOSTNAMES"
+ continue
+ fi
+
+ while read -r hostname; do
+ [[ -n "$hostname" ]] && EXISTING_HOSTNAMES+="$hostname"$'\n'
+ done <<< "$HOSTNAMES"
+
+ write_log "Retrieved hostnames from $node:"
+ write_log "$HOSTNAMES"
+ fi
+done
+
+# Scrape a list of containers (agent-names) on the Wazuh manager
+
+EXISTING_AGENTS=$(node /var/lib/vz/snippets/Wazuh/runner.js getAgents | sed '1d')
+
+write_log "Retrieved agents from Wazuh manager:"
+write_log "$EXISTING_AGENTS"
+
+# Iterate over each agent and if a existing host name does not exist, delete the agent.
+
+while read -r agent; do
+ if ! echo "$EXISTING_HOSTNAMES" | grep -q "^$agent$" && [[ ! "$agent" =~ ^intern-phxdc-pve[0-9]$ ]]; then
+ write_log "Removing agent $agent from Wazuh manager..."
+ REMOVE_AGENT=$(node /var/lib/vz/snippets/Wazuh/runner.js deleteAgent "$agent")
+ if [ "$REMOVE_AGENT" == "success" ]; then
+ write_log "✅ Successfully removed agent $agent."
+ else
+ write_log "❌ Failed to remove agent $agent."
+ fi
+ else
+ write_log "Agent $agent is still active. No action taken."
+ fi
+done <<< "$EXISTING_AGENTS"
diff --git a/Wazuh/register-agent.sh b/Wazuh/register-agent.sh
new file mode 100644
index 00000000..724cd93a
--- /dev/null
+++ b/Wazuh/register-agent.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Wazuh Registration Script to register an agent with the Wazuh manager
+# Last Modified on August 6th, 2025 by Maxwell Klema
+# -------------------------------------------------
+
+
+KEY=$(node /var/lib/vz/snippets/Wazuh/runner.js addAgent "$CONTAINER_NAME" "$CONTAINER_IP" | sed -n '2p')
+MANAGER_IP="10.15.173.19"
+
+if [ "$KEY" == "fail" ]; then
+ echo "Failed to register agent with Wazuh manager."
+ exit 1
+fi
+
+# Install all necessary dependencies and register the agent to the manager
+
+pct enter "$CONTAINER_ID" -- < [!NOTE]
-> This project is new and is in a early version. There are likely bugs. If you encounter any, please create an issue.
-
-## Table of Contents
-1. [Video Walkthroughs](#video-walkthroughs)
-2. [Sequence Diagram](#sequence-diagram)
-3. [Prerequisites](#prerequisites)
-4. [Getting Started](#getting-started)
- - [Create-Runner Job](#create-runner-workflow-job)
- - [Personal Access Token](#creating-a-github-pat-for-your-workflow)
- - [Runner Job](#runner-job)
- - [Manage-Container Job](#manage-container-workflow-job)
-5. [Configurations](#configurations)
- - [Basic Properties](#basic-properties)
- - [Automatic Deployment Properties](#automatic-deployment-properties)
-6. [Important Notes for Automatic Deployment](#important-notes-for-automatic-deployment)
-7. [Output](#output)
-8. [Sample Workflow File ](#sample-workflow-file)
-9. [Misc.](#misc)
-
-## Video Walkthroughs
-
-I have created a series of videos to walk you through automatic deployment, both in GitHub and via the command line.
-
-**[Long-Form]** Proxmox LaunchPad Walkthrough: [Video](https://youtu.be/Xa2L1o-atEM)
-**[Short-Form]** Proxmox LaunchPad Demonstration: [Short](https://youtube.com/shorts/SuK73Jej5j4)
-**[Long-Form]** Automatic Deployment through Command Line: [Video](https://youtu.be/acDW-a32Yr8)
-**[Long-Form]** Getting Started with Creating LXC Continers with Proxmox: [Video](https://youtu.be/sVW3dkBqs4E)
-
-## Sequence Diagram
-
-The sequence diagram below describes the sequence of events executed by this Github Action.
-
-```mermaid
-sequenceDiagram
- participant Dev as Developer
- participant GH as GitHub
- participant GHAR as GitHub Actions Runner (hosted)
- participant Prox as Proxmox Cluster
- participant LXC as LXC Container (Self-hosted Runner)
-
- Dev->>GH: Push/Create/Delete branch
- GH->>GHAR: Trigger workflow
-
- alt Push/Create event
- GHAR->>Prox: Check if LXC container exists for branch
- alt Container does not exist
- GHAR->>Prox: Clone template, create LXC container
- Prox->>LXC: Start container, configure self-hosted runner
- GHAR->>LXC: Register self-hosted runner
- GHAR->>LXC: Run manage container job (install deps, clone repo, install services, deploy app)
- else Container exists
- GHAR->>Prox: Call update script
- Prox->>LXC: Update container contents, restart with latest branch
- end
- else Delete event
- GHAR->>LXC: Call delete-container script
- LXC->>Prox: Remove runner and delete LXC container
- end
-```
-
-## Prerequisites
-- Proxmox Datacluster Setup that mirrors/forks [https://github.com/mieweb/opensource-server](https://github.com/mieweb/opensource-server).
-- Valid Proxmox Account.
-
-## Getting Started
-
-> [!WARNING]
-> This Github Action requires you to pass your Github Personal Access Token in order to create runners. If you are comfortable doing this, see [Create-Runner Job](#create-runner-workflow-job). If you are not, you may supply your own self-hosted runner and skip to [Manage-Container Job](#manage-container-workflow-job).
-
-To use this action in your repository, you need to add the following trigger events in a workflow file:
-
-```yaml
-on:
- push:
- create:
- delete:
-```
-
-This allows a container to be created/updated on a push command, created when a new branch is created, and deleted when a branch is deleted (like in the case of an accepted PR).
-
-### Create-Runner Workflow Job
-
-> [!CAUTION]
-> If you choose to pass in your GitHub Personal Access Token, keep it in a secure place and do not share it with anyone.
-
-#### Creating a GitHub PAT for your Workflow
-
-This Github Action requires you to pass your Github Personal Access Token in order to create runners. To create a PAT, navigate to your GitHub account settings. Then, on the bottom left-hand side, click developer settings. Navigate to Personal Access Tokens (classic). Click on generate new token, then give your token a name and an expiration date. Finally, select the manage_runners:org permission or the manage_runners:enterprise permission, depending on where your repository is housed. Finally, a token should be generated. Make sure to place the token somewhere securely. Then, add it as a repository secret in the repository that you want to run your workflow file in.
-
-#### Runner Job
-
-Before a container can be managed, a self-hosted runner must be installed on the LXC container to complete future workflow jobs. To do this, a github-supplied runner needs to create the container and install/start a custom runner on it that is linked to your repository.
-
-The create-runner job in your workflow file should look similar to this:
-
-```yaml
-setup-runner:
- runs-on: ubuntu-latest
- steps:
- - name: Install Dependencies
- run: |
- sudo apt install -y sshpass jq
-
- - uses: maxklema/proxmox-launchpad@main
- with:
- proxmox_password: ${{ secrets.PROXMOX_PASSWORD }}
- proxmox_username: ${{ secrets.PROXMOX_USERNAME }}
- github_pat: ${{ secrets.GH_PAT }}
-```
-
-The GitHub runner needs to install sshpass (used to authenticate into another host using password authentication) and jq (a popular package for managing/parsing JSON data).
-
-In the second step, 3 fields are required: `proxmox_username`, `proxmox_password`, and `github_pat`
-
-To see an explanation for these fields: See [Basic Properties](#basic-properties)
-
-
-### Manage-Container Workflow Job
-
-The second job in your workflow file should look similar to this:
-
-> [!NOTE]
-> If you chose to run this on your own self-hosted runner instead of the action creating one for you, this will be your first job. Therefore, the needs parameter is not needed.
-
-```yaml
- manage-container:
- runs-on: self-hosted
- needs: setup-runner
- steps:
- - uses: maxklema/proxmox-launchpad@test
- with:
- proxmox_password: ${{ secrets.PROXMOX_PASSWORD }}
- proxmox_username: ${{ secrets.PROXMOX_USERNAME }}
-```
-
-
-
-## Configurations
-
-At the very minimum, two configuration settings are required to create any container. With all of these properties specified, you can create an empty container for a branch.
-
-### Basic Properties
-
-| Propety | Required? | Description | Supplied by Github? |
-| ---------------- | ------ | ---------------------------------------------- | ------ |
-| `proxmox_username` | Yes | Your proxmox username assigned to you. | N/A
-| `proxmox_password` | Yes | Your proxmox password assigned to you. | N/A
-| `http_port` | No | The HTTP Port for your container to listen on. It must be between `80` and `60000`. Default value is `3000`. | N/A
-| `linux_distribution` | No | The Linux Distribution that runs on your container. Currently, `rocky` (Rocky 9.5) and `debian` (Debian 12) are available. Default value is `Debian`. | N/A
-| `github_pat` | Conditional | Your GitHub Personal Access Token. This is used to manage runners in your containers. This is **only required if you want the workflow to create runners for you.**| Yes. Accessable in developer settings. |
-
-
-There are a few other properties that are not required, but can still be specified in the workflow file:
-
-
-| Propety | Required? | Description | Supplied by Github? |
-| --------- | ----- | ------------------------------------ | ------ |
-| `public_key` | No | Your machine's public key that will be stored in the `~/.ssh/authorized_keys` file of your repository. This allows you to SSH into your container without a password. It is more secure and recommended. | N/A
-
-### Automatic Deployment Properties
-
-This github action can *attempt* to automatically deploy services on your container. This is done by fetching your repository contents on the branch that the script is being ran in, installing dependencies/services, and running build and start commands in the background.
-
-Additionally, with automatic deployment enabled, your container will update on every push command automatically, preventing you from having to SSH into the container and setting it up manually.
-
-> [!NOTE]
-> Properties below that are required assuming you want to automatically deploy your project. If not, none of these properties are needed.
-
-| Propety | Required? | Description |
-| --------- | ----- | ------------------------------------ |
-| `project_root` | No | The root directory of your project to deploy from. Example: `/flask-server`. If the root directory is the same as the github root directory, leave blank.
-| `services` | No | A JSON array of services to add to your container. Example: ```services: '["mongodb", "docker"]'```. These services will automatically install and start up on container creation. **NOTE**: All services in this list must belong on the list of available services below. If you need a service that is not on the list, see `custom_services`.
Available Services: `meteor`, `mongodb`, `docker`, `redis`, `postgresql`, `apache`, `nginx`, `rabbitmq`, `memcached`, `mariadb`.
-| `custom_services` | No | A 2D JSON array of custom service installation commands to install any custom service(s) not in `services`.
Example: ```custom_services: [["sudo apt-get install -y service", "sudo systemctl enable service", "sudo systemctl start service"], ["sudo apt-get install -y service2", "sudo systemctl enable service2", "sudo systemctl start service2"]]```
-
-
-There are two types of deployments: single component and multi-component deployment. Single component deployment involves deploying only a single service (i.e. a single Flask Server, REACT application, MCP Server, etc.). Multi-component deployment involves deploying more than one service at the same time (i.e. a flask backend and a vite.js backend).
-
-> [!IMPORTANT]
-> In Multi-Component applications, each top-layer key represents the file path, relative to the root directory, to the component (service) to place those variables/commands in.
-
-| Propety | Required? | Description | Single Component | Multi-Component |
-| --------- | ----- | ------------------------------------ | ---- | --- |
-| `container_env_vars` | No. | Key-Value Environment variable pairs. | Dictionary in the form of: `{ "api_key": "123", "password": "abc"}` | Dictionary in the form of: `'{"/frontend": { "api_key": "123"}, "/backend": { "password": "abc123" }}'`.
-| `install_command` | Yes | Commands to install all project dependencies | String of the installation command, i.e. `npm install`. | Dictionary in the form of: `'{"/frontend": "npm install", "/backend": "pip install -r ../requirements.txt"}'`.
-| `build_command` | No | Commands to build project components | String of the build command, i.e. `npm build`. | Dictionary in the form of: `'{"/frontend": "npm build", "/backend": "python3 build.py"}'`.
-| `start_command` | Yes | Commands to start project components. | String of the start command, i.e. `npm run`. | Dictionary in the form of: `'{"/frontend": "npm run", "/backend": "flask run"}'`.
-| `runtime_language` | Yes | Runtime language of each project component, which can either be `nodejs` or `python`. | String of runtime environment, i.e. `nodejs` | Dictionary in the form of: `'{"/frontend": "nodejs", "/backend": "python"}'`.
-| `root_start_command` | No | Command to run at the project directory root for **multi-component applications**. | N/A | String of the command, i.e. `Docker run`
-
-## Important Notes for Automatic Deployment
-
-Below are some important things to keep in mind if you want your application to be automatically deployed:
-- If you are using meteor, you must start your application with the flags ``--allow-superuser`` and `--port 0.0.0.0:`.
- - Meteor is a large package, so deploying it may take more time than other applications.
-- When running a service, ensure it is listening on `0.0.0.0` (your IP) instead of only locally at `127.0.0.1`.
-- The Github action will fail with an exit code and message if a property is not set up correctly.
-
-
-## Output
-
-When a container is successfully created (Github Action is successful), you will see an output with all of your container details. This includes all your ports, container ID, container IP Address (internal in 10.15.x.x subnet), public domain name, and ssh command to access your container.
-
-See an example output below:
-
-```
-━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-🔔 COPY THESE PORTS DOWN — For External Access
-━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-📌 Note: Your container listens on SSH Port 22 internally,
- but EXTERNAL traffic must use the SSH port listed below:
-━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-✅ Hostname Registration: polyglot-test-maxklema-pull-request → 10.15.129.23
-🔐 SSH Port : 2344
-🌐 HTTP Port : 32000
-━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-📦 Container ID : 136
-🌐 Internal IP : 10.15.129.23
-🔗 Domain Name : https://polyglot-test-maxklema-pull-request.opensource.mieweb.org
-🛠️ SSH Access : ssh -p 2344 root@polyglot-test-maxklema-pull-request.opensource.mieweb.org
-━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-NOTE: Additional background scripts are being ran in detached terminal sessions.
-Wait up to two minutes for all processes to complete.
-━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-Still not working? Contact Max K. at maxklema@gmail.com
-━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-```
-
-> [!NOTE]
-> Even if your GitHub Action workflow is finished, *it may not be accessible right away. Background tasks (migration, template cloning, cleanup, etc) are still be ran in detatched terminal sessions*. Wait a few minutes for all tasks to complete.
-
-## Sample Workflow File
-
-The workflow file below is an example workflow designed to deploy a multi-component application with a python (flask) backend and nodejs (vite) frontend:
-
-**With PAT:**
-
-```yaml
-name: Proxmox Container Management
-
-on:
- push:
- create:
- delete:
-
-jobs:
- setup-runner:
- runs-on: ubuntu-latest
- steps:
- - name: Install Dependencies
- run: |
- sudo apt install -y sshpass jq
- - uses: maxklema/proxmox-launchpad@test
- with:
- proxmox_password: ${{ secrets.PROXMOX_PASSWORD }}
- proxmox_username: ${{ secrets.PROXMOX_USERNAME }}
- github_pat: ${{ secrets.GH_PAT }}
- manage-container:
- runs-on: self-hosted
- needs: setup-runner
- steps:
- - uses: maxklema/proxmox-launchpad@test
- with:
- proxmox_password: ${{ secrets.PROXMOX_PASSWORD }}
- proxmox_username: ${{ secrets.PROXMOX_USERNAME }}
- public_key: ${{ secrets.PUBLIC_KEY }}
- container_env_vars: '{"API_KEY": "1234"}'
- install_command: npm i
- start_command: npm start
- runtime_language: nodejs
- services: '["mongodb"]'
-```
-
-**Without PAT:**
-
-```yaml
-name: Proxmox Container Management
-
-on:
- push:
- create:
- delete:
-
-jobs:
- manage-container:
- runs-on: self-hosted
- needs: setup-runner
- steps:
- - uses: maxklema/proxmox-launchpad@test
- with:
- proxmox_password: ${{ secrets.PROXMOX_PASSWORD }}
- proxmox_username: ${{ secrets.PROXMOX_USERNAME }}
- public_key: ${{ secrets.PUBLIC_KEY }}
- container_env_vars: '{"API_KEY": "1234"}'
- install_command: npm i
- start_command: npm start
- runtime_language: nodejs
- services: '["mongodb"]'
-```
-
-
-## Misc.
-Feel free to submit a PR/issue here or in [opensource-server](https://github.com/mieweb/opensource-server).
-Author: [@maxklema](https://github.com/maxklema)
diff --git a/ci-cd automation/proxmox-launchpad/action.yml b/ci-cd automation/proxmox-launchpad/action.yml
deleted file mode 100644
index 79f26b59..00000000
--- a/ci-cd automation/proxmox-launchpad/action.yml
+++ /dev/null
@@ -1,494 +0,0 @@
-# action.yml
-name: Proxmox LaunchPad
-description: Manage Proxmox Containers for your Repository.
-author: maxklema
-branding:
- icon: "package"
- color: "purple"
-
-inputs:
- proxmox_username:
- required: true
- proxmox_password:
- required: true
- container_password:
- required: false
- public_key:
- required: false
- http_port:
- required: false
- project_root:
- required: false
- container_env_vars:
- required: false
- install_command:
- required: false
- build_command:
- required: false
- start_command:
- required: false
- runtime_language:
- required: false
- services:
- required: false
- custom_services:
- required: false
- linux_distribution:
- required: false
- multi_component:
- required: false
- root_start_command:
- required: false
- github_pat:
- required: false
-
-runs:
- using: "composite"
- steps:
- - name: Check if action should run
- shell: bash
- id: should-run
- env:
- GITHUB_EVENT_NAME: ${{ github.event_name }}
- GITHUB_EVENT_CREATED: ${{ github.event.created }}
- run: |
- if [[ "$GITHUB_EVENT_NAME" != "push" ]] || [[ "$GITHUB_EVENT_CREATED" == "false" ]]; then
- echo "should_run=true" >> $GITHUB_OUTPUT
- else
- echo "should_run=false" >> $GITHUB_OUTPUT
- echo "Skipping action: Push event with created=true"
- fi
-
- - name: Determine Target Branch Name
- shell: bash
- id: branch-name
- if: steps.should-run.outputs.should_run == 'true'
- env:
- GITHUB_EVENT_NAME: ${{ github.event_name }}
- GITHUB_REF_NAME: ${{ github.ref_name }}
- GITHUB_EVENT_REF: ${{ github.event.ref }}
- run: |
- if [[ "$GITHUB_EVENT_NAME" == "delete" ]]; then
- TARGET_BRANCH="$GITHUB_EVENT_REF"
- echo "Using deleted branch name: $TARGET_BRANCH"
- else
- TARGET_BRANCH="$GITHUB_REF_NAME"
- echo "Using current branch name: $TARGET_BRANCH"
- fi
- echo "target_branch=$TARGET_BRANCH" >> $GITHUB_OUTPUT
-
- - name: Create Runner (If Needed)
- shell: bash
- id: create-runner
- if: steps.should-run.outputs.should_run == 'true'
- env:
- GITHUB_REPOSITORY_FULL: ${{ github.repository }}
- GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
- TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }}
- CONTAINER_PASSWORD: ${{ inputs.container_password }}
- PROXMOX_USERNAME: ${{ inputs.proxmox_username }}
- PROXMOX_PASSWORD: ${{ inputs.proxmox_password }}
- GITHUB_PAT: ${{ inputs.github_pat }}
- GITHUB_API: ${{ github.api_url }}
- LINUX_DISTRIBUTION: ${{ inputs.linux_distribution }}
- PROJECT_REPOSITORY: ${{ github.server_url }}/${{ github.repository }}
- GITHUB_JOB: ${{ github.job }}
- run: |
- REPO_NAME=$(basename "$GITHUB_REPOSITORY_FULL")
- CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}"
- CONTAINER_NAME=${CONTAINER_NAME,,}
- CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g')
- export CONTAINER_NAME
-
- # Auto-detect if this is a runner setup job based on job name or if no container inputs are provided
- CREATE_RUNNER_JOB="N"
- if [[ "$GITHUB_JOB" == *"setup"* ]] || [[ "$GITHUB_JOB" == *"runner"* ]]; then
- CREATE_RUNNER_JOB="Y"
- echo "CREATE_RUNNER_JOB=true" >> $GITHUB_OUTPUT
- fi
-
- if [ ! -z "$GITHUB_PAT" ]; then
- RESPONSE=$(curl --location ${GITHUB_API}/repos/${GITHUB_REPOSITORY_OWNER}/${REPO_NAME}/actions/runners --header "Authorization: token $GITHUB_PAT")
-
- while read -r RUN; do
- RUNNER_NAME=$(echo "$RUN" | jq -r '.name')
- if [ "$RUNNER_NAME" == "$CONTAINER_NAME" ]; then
- if [ "${CREATE_RUNNER_JOB^^}" == "N" ]; then
- exit 0 #Runner exists, continue to next steps
- else
- echo "STOP_SCRIPT=true" >> $GITHUB_OUTPUT
- exit 0 # Runner exists, continue to next job.
- fi
- fi
- done < <(echo "$RESPONSE" | jq -c '.runners[]')
-
- echo "Creating a Runner..."
- set +e
- sshpass -p 'mie123!' ssh \
- -T \
- -o StrictHostKeyChecking=no \
- -o UserKnownHostsFile=/dev/null \
- -o SendEnv="CONTAINER_NAME CONTAINER_PASSWORD PROXMOX_USERNAME PROXMOX_PASSWORD GITHUB_PAT LINUX_DISTRIBUTION PROJECT_REPOSITORY" \
- setup-runner@opensource.mieweb.org
-
- EXIT_STATUS=$?
-
- # Exit if a container exists but an associated runner does not.
- if [ $EXIT_STATUS != 3 ]; then
- echo "Something went wrong with creating/using a runner."
- exit 1
- fi
-
- echo "STOP_SCRIPT=true" >> $GITHUB_OUTPUT
- fi
-
- - name: Container Creation for Branch (If Needed)
- id: create-lxc
- shell: bash
- if: ${{ (github.event_name == 'create' || github.event_name == 'push') && steps.should-run.outputs.should_run == 'true' }}
- env:
- GITHUB_EVENT: ${{ github.event_name }}
- GITHUB_REPOSITORY_FULL: ${{ github.repository }}
- GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
- TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }}
- CONTAINER_PASSWORD: ${{ inputs.container_password }}
- PROXMOX_USERNAME: ${{ inputs.proxmox_username }}
- PROXMOX_PASSWORD: ${{ inputs.proxmox_password }}
- PUBLIC_KEY: ${{ inputs.public_key }}
- HTTP_PORT: ${{ inputs.http_port }}
- DEPLOY_ON_START: ${{ inputs.deploy_on_start }}
- PROJECT_REPOSITORY: ${{ github.server_url }}/${{ github.repository }}
- PROJECT_BRANCH: ${{ steps.branch-name.outputs.target_branch }}
- PROJECT_ROOT: ${{ inputs.project_root }}
- REQUIRE_ENV_VARS: ${{ inputs.require_env_vars }}
- CONTAINER_ENV_VARS: ${{ inputs.container_env_vars }}
- INSTALL_COMMAND: ${{ inputs.install_command }}
- START_COMMAND: ${{ inputs.start_command }}
- BUILD_COMMAND: ${{ inputs.build_command }}
- RUNTIME_LANGUAGE: ${{ inputs.runtime_language }}
- REQUIRE_SERVICES: ${{ inputs.require_services }}
- SERVICES: ${{ inputs.services }}
- CUSTOM_SERVICES: ${{ inputs.custom_services }}
- LINUX_DISTRIBUTION: ${{ inputs.linux_distribution }}
- MULTI_COMPONENT: ${{ inputs.multi_component }}
- ROOT_START_COMMAND: ${{ inputs.root_start_command }}
- GITHUB_PAT: ${{ inputs.github_pat }}
- GH_ACTION: y
- run: |
- set +e
- REPO_NAME=$(basename "$GITHUB_REPOSITORY_FULL")
- CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}"
- CONTAINER_NAME=${CONTAINER_NAME,,}
- CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g')
- export CONTAINER_NAME
- STOP_SCRIPT=${{ steps.create-runner.outputs.STOP_SCRIPT }}
- if [ "$STOP_SCRIPT" != "true" ]; then
- set +e
- echo "Running Container Exists..."
-
- # Determine SSH target based on network location
- EXTERNAL_IP=$(dig +short opensource.mieweb.org)
- if [ "$EXTERNAL_IP" = "10.15.20.69" ]; then
- SSH_TARGET="10.15.0.4"
- else
- SSH_TARGET="opensource.mieweb.org"
- fi
-
- sshpass -p 'mie123!' ssh \
- -T \
- -o StrictHostKeyChecking=no \
- -o UserKnownHostsFile=/dev/null \
- -o SendEnv="PROXMOX_USERNAME PROXMOX_PASSWORD CONTAINER_NAME PROJECT_REPOSITORY" \
- container-exists@$SSH_TARGET
- CONTAINER_EXISTS=$?
- if [ $CONTAINER_EXISTS -eq 1 ]; then
- echo "FAILED=1" >> $GITHUB_ENV # User does not own the container
- elif [ $CONTAINER_EXISTS -eq 0 ]; then
- echo "Cloning repository based on $PROJECT_BRANCH branch."
-
- sshpass -p 'mie123!' ssh \
- -T \
- -o StrictHostKeyChecking=no \
- -o UserKnownHostsFile=/dev/null \
- -o SendEnv="CONTAINER_NAME CONTAINER_PASSWORD PROXMOX_USERNAME PUBLIC_KEY PROXMOX_PASSWORD HTTP_PORT DEPLOY_ON_START PROJECT_REPOSITORY PROJECT_BRANCH PROJECT_ROOT REQUIRE_ENV_VARS CONTAINER_ENV_VARS INSTALL_COMMAND START_COMMAND RUNTIME_LANGUAGE REQUIRE_SERVICES SERVICES CUSTOM_SERVICES LINUX_DISTRIBUTION MULTI_COMPONENT ROOT_START_COMMAND GH_ACTION GITHUB_PAT" \
- create-container@$SSH_TARGET
-
- CONTAINER_CREATED=$?
- echo "CONTAINER_CREATED=true" >> $GITHUB_OUTPUT
- if [ $CONTAINER_CREATED -ne 0 ]; then
- echo "FAILED=1" >> $GITHUB_ENV
- fi
- fi
- fi
-
- - name: Container Update on Branch Push
- shell: bash
- if: ${{ (github.event_name == 'push' && steps.create-lxc.outputs.CONTAINER_CREATED != 'true') && steps.should-run.outputs.should_run == 'true' }}
- env:
- GITHUB_EVENT: ${{ github.event_name }}
- GITHUB_REPOSITORY_FULL: ${{ github.repository }}
- GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
- TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }}
- PROXMOX_USERNAME: ${{ inputs.proxmox_username }}
- PROXMOX_PASSWORD: ${{ inputs.proxmox_password }}
- PROJECT_REPOSITORY: ${{ github.server_url }}/${{ github.repository }}
- PROJECT_BRANCH: ${{ steps.branch-name.outputs.target_branch }}
- PROJECT_ROOT: ${{ inputs.project_root }}
- INSTALL_COMMAND: ${{ inputs.install_command }}
- START_COMMAND: ${{ inputs.start_command }}
- BUILD_COMMAND: ${{ inputs.build_command }}
- RUNTIME_LANGUAGE: ${{ inputs.runtime_language }}
- MULTI_COMPONENT: ${{ inputs.multi_component }}
- SERVICES: ${{ inputs.services }}
- CUSTOM_SERVICES: ${{ inputs.custom_services }}
- REQUIRE_SERVICES: ${{ inputs.require_services }}
- LINUX_DISTRIBUTION: ${{ inputs.linux_distribution }}
- DEPLOY_ON_START: ${{ inputs.deploy_on_start }}
- ROOT_START_COMMAND: ${{ inputs.root_start_command }}
- GITHUB_PAT: ${{ inputs.github_pat }}
- HTTP_PORT: ${{ inputs.http_port }}
- GH_ACTION: y
- run: |
- set +e
- echo "Running Container Update..."
- REPO_NAME=$(basename "$GITHUB_REPOSITORY_FULL")
- CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}"
- CONTAINER_NAME=${CONTAINER_NAME,,}
- CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g')
- export CONTAINER_NAME
- echo "$LINUX_DISTRIBUTION"
- STOP_SCRIPT=${{ steps.create-runner.outputs.STOP_SCRIPT }}
- if [ "$STOP_SCRIPT" != true ]; then
- # Determine SSH target based on network location
- EXTERNAL_IP=$(dig +short opensource.mieweb.org)
- if [ "$EXTERNAL_IP" = "10.15.20.69" ]; then
- SSH_TARGET="10.15.0.4"
- else
- SSH_TARGET="opensource.mieweb.org"
- fi
-
- sshpass -p 'mie123!' ssh \
- -T \
- -o StrictHostKeyChecking=no \
- -o UserKnownHostsFile=/dev/null \
- -o SendEnv="CONTAINER_NAME PROXMOX_USERNAME PROXMOX_PASSWORD PROJECT_REPOSITORY PROJECT_BRANCH PROJECT_ROOT INSTALL_COMMAND START_COMMAND BUILD_COMMAND RUNTIME_LANGUAGE MULTI_COMPONENT ROOT_START_COMMAND DEPLOY_ON_START SERVICES CUSTOM_SERVICES REQUIRE_SERVICES LINUX_DISTRIBUTION GH_ACTION HTTP_PORT" \
- update-container@$SSH_TARGET
- UPDATE_EXIT=$?
- if [ $UPDATE_EXIT -ne 0 ]; then
- echo "FAILED=1" >> $GITHUB_ENV
- fi
- fi
-
- - name: Container Deletion on Branch Deletion (Check)
- shell: bash
- if: ${{ github.event_name == 'delete' && steps.should-run.outputs.should_run == 'true' }}
- env:
- GITHUB_EVENT: ${{ github.event_name }}
- GITHUB_REPOSITORY_FULL: ${{ github.repository }}
- GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
- TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }}
- PROXMOX_USERNAME: ${{ inputs.proxmox_username }}
- PROXMOX_PASSWORD: ${{ inputs.proxmox_password }}
- PROJECT_REPOSITORY: ${{ github.server_url }}/${{ github.repository }}
- GITHUB_PAT: ${{ inputs.github_pat }}
- run: |
- set +e
- REPO_NAME=$(basename "$GITHUB_REPOSITORY_FULL")
- CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}"
- CONTAINER_NAME=${CONTAINER_NAME,,}
- CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g')
- export CONTAINER_NAME
- STOP_SCRIPT=${{ steps.create-runner.outputs.STOP_SCRIPT }}
- if [ "$STOP_SCRIPT" != true ]; then
- # Determine SSH target based on network location
- EXTERNAL_IP=$(dig +short opensource.mieweb.org)
- if [ "$EXTERNAL_IP" = "10.15.20.69" ]; then
- SSH_TARGET="10.15.0.4"
- else
- SSH_TARGET="opensource.mieweb.org"
- fi
-
- sshpass -p 'mie123!' ssh \
- -T \
- -o StrictHostKeyChecking=no \
- -o UserKnownHostsFile=/dev/null \
- -o SendEnv="PROXMOX_USERNAME PROXMOX_PASSWORD CONTAINER_NAME GITHUB_PAT PROJECT_REPOSITORY" \
- delete-container@$SSH_TARGET
- DELETE_EXIT=$?
- if [ $DELETE_EXIT -ne 0 ]; then
- echo "FAILED=1" >> $GITHUB_ENV
- fi
- fi
-
- - name: Check if branch is part of a PR and comment
- shell: bash
- id: check-pr
- if: steps.should-run.outputs.should_run == 'true' && steps.create-runner.outputs.CREATE_RUNNER_JOB != 'true' && env.FAILED != '1'
- env:
- GITHUB_TOKEN: ${{ inputs.github_pat }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }}
- GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
- RUN_ID: ${{ github.run_id }}
- run: |
- if [ -z "$GITHUB_TOKEN" ]; then
- echo "pr_number=" >> $GITHUB_OUTPUT
- echo "is_pr=false" >> $GITHUB_OUTPUT
- echo "No GitHub token provided, skipping PR detection"
- exit 0
- fi
-
- # Check if this branch has an open PR
- PR_DATA=$(curl -s -H "Authorization: token $GITHUB_TOKEN" \
- "https://api.github.com/repos/$GITHUB_REPOSITORY/pulls?state=open&head=${{ github.repository_owner }}:$TARGET_BRANCH")
-
- PR_NUMBER=$(echo "$PR_DATA" | jq -r '.[0].number // empty')
-
- if [ -n "$PR_NUMBER" ] && [ "$PR_NUMBER" != "null" ]; then
- echo "pr_number=$PR_NUMBER" >> $GITHUB_OUTPUT
- echo "is_pr=true" >> $GITHUB_OUTPUT
- echo "Branch $TARGET_BRANCH is part of PR #$PR_NUMBER"
-
- # Generate container name
- REPO_NAME=$(basename "$GITHUB_REPOSITORY")
- CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}"
- CONTAINER_NAME=${CONTAINER_NAME,,}
- CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g')
-
- # Create initial comment on PR
- CONTAINER_URL="https://${CONTAINER_NAME}.opensource.mieweb.org"
-
- COMMENT_BODY="## 🚀 Proxmox LaunchPad Action
- **Expected URL**: [$CONTAINER_NAME]($CONTAINER_URL) *(will be available once deployment completes)*
- **Status**: ✅ Application was deployed according to workflow configurations.
- **Branch**: \`$TARGET_BRANCH\`
- **Run ID**: [\`$RUN_ID\`](https://github.com/$GITHUB_REPOSITORY/actions/runs/$RUN_ID)
- **Container Name**: \`$CONTAINER_NAME\`
-
- > This comment was automatically generated by Proxmox LaunchPad: The fastest way to deploy your repository code. To use Proxmox in your own repository, see: [Proxmox LaunchPad](https://github.com/marketplace/actions/proxmox-launchpad)."
-
- # Use jq to safely build the JSON payload from the variable
- JSON_PAYLOAD=$(jq -n --arg body "$COMMENT_BODY" '{body: $body}')
-
- # Post the initial comment
- curl -s -X POST \
- -H "Authorization: token $GITHUB_TOKEN" \
- -H "Content-Type: application/json" \
- -H "Accept: application/vnd.github.v3+json" \
- "https://api.github.com/repos/$GITHUB_REPOSITORY/issues/$PR_NUMBER/comments" \
- -d "$JSON_PAYLOAD" > /dev/null
-
- echo "Initial comment posted to PR #$PR_NUMBER"
- else
- echo "pr_number=" >> $GITHUB_OUTPUT
- echo "is_pr=false" >> $GITHUB_OUTPUT
- echo "Branch $TARGET_BRANCH is not part of any open PR"
- fi
-
- - name: Comment on PR on Failure
- if: env.FAILED == '1'
- shell: bash
- env:
- GITHUB_TOKEN: ${{ inputs.github_pat }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- TARGET_BRANCH: ${{ steps.branch-name.outputs.target_branch }}
- RUN_ID: ${{ github.run_id }}
- GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
- run: |
- if [ -z "$GITHUB_TOKEN" ]; then
- echo "Cannot comment on PR: missing token"
- exit 1
- fi
-
- # Check if this branch has an open PR
- PR_DATA=$(curl -s -H "Authorization: token $GITHUB_TOKEN" \
- "https://api.github.com/repos/$GITHUB_REPOSITORY/pulls?state=open&head=${GITHUB_REPOSITORY_OWNER}:$TARGET_BRANCH")
-
- PR_NUMBER=$(echo "$PR_DATA" | jq -r '.[0].number // empty')
-
- if [ -z "$PR_NUMBER" ] || [ "$PR_NUMBER" == "null" ]; then
- echo "Not a pull request, skipping failure comment."
- exit 0
- fi
-
- REPO_NAME=$(basename "$GITHUB_REPOSITORY")
- CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${TARGET_BRANCH}"
- CONTAINER_NAME=${CONTAINER_NAME,,}
- CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g')
-
- CONTAINER_URL="https://${CONTAINER_NAME}.opensource.mieweb.org"
-
- COMMENT_BODY="## 🚀 Proxmox LaunchPad Action
- **Expected URL**: [$CONTAINER_NAME]($CONTAINER_URL) *(will be available once deployment completes)*
- **Status**: ❌ Application failed to deploy. View [\`$RUN_ID\`](https://github.com/$GITHUB_REPOSITORY/actions/runs/$RUN_ID) to see logs.
- **Branch**: \`$TARGET_BRANCH\`
- **Run ID**: [\`$RUN_ID\`](https://github.com/$GITHUB_REPOSITORY/actions/runs/$RUN_ID)
- **Container Name**: \`$CONTAINER_NAME\`
-
- > This comment was automatically generated by Proxmox LaunchPad: The fastest way to deploy your repository code. To use Proxmox in your own repository, see: [Proxmox LaunchPad](https://github.com/marketplace/actions/proxmox-launchpad)."
-
- JSON_PAYLOAD=$(jq -n --arg body "$COMMENT_BODY" '{body: $body}')
-
- # Post the comment
- curl -s -X POST \
- -H "Authorization: token $GITHUB_TOKEN" \
- -H "Content-Type: application/json" \
- -H "Accept: application/vnd.github.v3+json" \
- "https://api.github.com/repos/$GITHUB_REPOSITORY/issues/$PR_NUMBER/comments" \
- -d "$JSON_PAYLOAD" > /dev/null
-
- echo "Failure comment posted to PR #$PR_NUMBER"
- exit 1
-
- - name: Create GitHub Deployment (Default Branch)
- if: github.ref == format('refs/heads/{0}', github.event.repository.default_branch) && env.FAILED != '1'
- shell: bash
- env:
- GITHUB_TOKEN: ${{ inputs.github_pat }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- GITHUB_SHA: ${{ github.sha }}
- GITHUB_REF: ${{ github.ref }}
- GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
- run: |
- REPO_NAME=$(basename "$GITHUB_REPOSITORY")
- CONTAINER_NAME="${GITHUB_REPOSITORY_OWNER}-${REPO_NAME}-${GITHUB_REF#refs/heads/}"
- CONTAINER_NAME=${CONTAINER_NAME,,}
- CONTAINER_NAME=$(echo "$CONTAINER_NAME" | sed 's/[^a-z0-9-]/-/g')
- CONTAINER_URL="https://${CONTAINER_NAME}.opensource.mieweb.org"
- DEPLOYMENT_RESPONSE=$(curl -s -X POST \
- -H "Authorization: token $GITHUB_TOKEN" \
- -H "Content-Type: application/json" \
- -H "Accept: application/vnd.github.v3+json" \
- "https://api.github.com/repos/$GITHUB_REPOSITORY/deployments" \
- -d '{
- "ref": "'${GITHUB_REF#refs/heads/}'",
- "required_contexts": [],
- "environment": "Preview - '$GITHUB_REPOSITORY'",
- "description": "Deployment triggered from Proxmox LaunchPad action.",
- "sha": "'$GITHUB_SHA'"
- }')
- DEPLOYMENT_ID=$(echo "$DEPLOYMENT_RESPONSE" | jq -r '.id')
- if [ "$DEPLOYMENT_ID" != "null" ] && [ -n "$DEPLOYMENT_ID" ]; then
- curl -s -X POST \
- -H "Authorization: token $GITHUB_TOKEN" \
- -H "Content-Type: application/json" \
- -H "Accept: application/vnd.github.v3+json" \
- "https://api.github.com/repos/$GITHUB_REPOSITORY/deployments/$DEPLOYMENT_ID/statuses" \
- -d '{
- "state": "success",
- "description": "Deployment completed successfully.",
- "environment": "Preview - '$GITHUB_REPOSITORY'",
- "environment_url": "'$CONTAINER_URL'"
- }' > /dev/null
- echo "Deployment created and marked as successful for default branch: ${GITHUB_REF#refs/heads/}"
- echo "Deployment URL: $CONTAINER_URL"
- else
- echo "Deployment creation failed."
- fi
-
- - name: Catch All Failure Step
- if: env.FAILED == '1'
- shell: bash
- run: |
- echo "Workflow failed. See previous steps for details."
- exit 1
diff --git a/container creation/create-container.sh b/container creation/create-container.sh
index 17d2148b..16013e6e 100644
--- a/container creation/create-container.sh
+++ b/container creation/create-container.sh
@@ -131,6 +131,10 @@ if [ -f "/var/lib/vz/snippets/container-public-keys/$PUB_FILE" ]; then
rm -rf /var/lib/vz/snippets/container-public-keys/$PUB_FILE > /dev/null 2>&1
fi
+# Generate a random root password for the container
+ROOT_PSWD=$(tr -dc 'A-Za-z0-9' /dev/null 2>&1
+
CONTAINER_IP=""
attempts=0
max_attempts=10
@@ -149,6 +153,10 @@ fi
echo "⏳ Configuring LDAP connection via SSSD..."
source /var/lib/vz/snippets/helper-scripts/configureLDAP.sh
+# Set up Wazuh-Agent on the container ====
+echo "⏳ Setting up Wazuh-Agent..."
+source /var/lib/vz/snippets/Wazuh/register-agent.sh
+
# Attempt to Automatically Deploy Project Inside Container
if [ "${DEPLOY_ON_START^^}" == "Y" ]; then
@@ -170,13 +178,12 @@ pct exec $CONTAINER_ID -- bash -c "cd /root && touch container-updates.log"
# Run Contianer Provision Script to add container to port_map.json
echo "⏳ Running Container Provision Script..."
if [ -f "/var/lib/vz/snippets/container-port-maps/$PROTOCOL_FILE" ]; then
- /var/lib/vz/snippets/register-container.sh $CONTAINER_ID $HTTP_PORT /var/lib/vz/snippets/container-port-maps/$PROTOCOL_FILE "$USERNAME_ONLY"
+ /var/lib/vz/snippets/register-container.sh $CONTAINER_ID $HTTP_PORT /var/lib/vz/snippets/container-port-maps/$PROTOCOL_FILE "$USERNAME_ONLY" "$ROOT_PSWD"
rm -rf /var/lib/vz/snippets/container-port-maps/$PROTOCOL_FILE > /dev/null 2>&1
else
- /var/lib/vz/snippets/register-container.sh $CONTAINER_ID $HTTP_PORT "" "$PROXMOX_USERNAME"
+ /var/lib/vz/snippets/register-container.sh $CONTAINER_ID $HTTP_PORT "" "$PROXMOX_USERNAME" "$ROOT_PSWD"
fi
-
SSH_PORT=$(iptables -t nat -S PREROUTING | grep "to-destination $CONTAINER_IP:22" | awk -F'--dport ' '{print $2}' | awk '{print $1}' | head -n 1 || true)
# Output container details and start services if necessary =====
diff --git a/container creation/register-container.sh b/container creation/register-container.sh
index 83a3cdd4..2a0ee4fb 100644
--- a/container creation/register-container.sh
+++ b/container creation/register-container.sh
@@ -1,27 +1,22 @@
#!/bin/bash
-# var/lib/vz/snippets/register-container.sh
-# Script to register a container's IP and ports in the NGINX port map JSON file.
-# Last Modified June 27 2025 by Maxwell Klema
set -euo pipefail
-if [[ -z "${1-}" || -z "${2-}" ]]; then
- echo "Usage: $0 "
+
+if [[ -z "${1-}" || -z "${2-}" || -z "${4-}" ]]; then
+ echo "Usage: $0 "
exit 1
fi
CTID="$1"
http_port="$2"
-ADDITIONAL_PROTOCOLS="${3-}" #set to empty string if not passed
+ADDITIONAL_PROTOCOLS="${3-}"
+proxmox_user="$4"
# Redirect stdout and stderr to a log file
LOGFILE="/var/log/pve-hook-$CTID.log"
exec > >(tee -a "$LOGFILE") 2>&1
-echo "---- Hookscript started at $(date) ----"
-echo "⏳ Waiting for container to boot and get DHCP lease..."
-#sleep 10
-
# Extract IP
container_ip=""
attempts=0
@@ -38,9 +33,9 @@ if [[ -z "$container_ip" ]]; then
fi
hostname=$(pct exec "$CTID" -- hostname)
+os_release=$(pct exec "$CTID" -- grep '^ID=' /etc/os-release | cut -d'=' -f2 | tr -d "\"")
# Check if this container already has a SSH port assigned in PREROUTING
-
existing_ssh_port=$(iptables -t nat -S PREROUTING | grep "to-destination $container_ip:22" | awk -F'--dport ' '{print $2}' | awk '{print $1}' | head -n 1 || true)
if [[ -n "$existing_ssh_port" ]]; then
@@ -64,57 +59,50 @@ else
fi
# Take input file of protocols, check if the container already has a port assigned for those protocols in PREROUTING
-
# Store all protocols and ports to write to JSON list later.
-
if [ ! -z "$ADDITIONAL_PROTOCOLS" ]; then
+ list_all_protocols=()
+ list_all_ports=()
- list_all_protocols=()
- list_all_ports=()
-
- while read line; do
-
- protocol=$(echo "$line" | awk '{print $1}')
- underlying_protocol=$(echo "$line" | awk '{print $2}')
- default_port_number=$(echo "$line" | awk '{print $3}')
-
- protocol_port=""
- existing_port=$(iptables -t nat -S PREROUTING | grep "to-destination $container_ip:$default_port_number" | awk -F'--dport ' '{print $2}' | awk '{print $1}' | head -n 1 || true)
-
- if [[ -n "$existing_port" ]]; then
- # Port already exists, so just assign it to protocol_port
- echo "ℹ️ This Container already has a $protocol port at $existing_port"
- protocol_port="$existing_port"
- else
- used_protocol_ports=$(iptables -t nat -S PREROUTING | awk -F'--dport ' '/--dport / {print $2}' | awk '{print $1}')
- protocol_port=$(comm -23 <(seq 10001 29999 | sort) <(echo "$used_protocol_ports" | sort) | head -n 1 || true)
+ while read line; do
+ protocol=$(echo "$line" | awk '{print $1}')
+ underlying_protocol=$(echo "$line" | awk '{print $2}')
+ default_port_number=$(echo "$line" | awk '{print $3}')
- if [[ -z "protocol_port" ]]; then
- echo "❌ No available $protocol ports found"
- exit 2
- fi
+ protocol_port=""
+ existing_port=$(iptables -t nat -S PREROUTING | grep "to-destination $container_ip:$default_port_number" | awk -F'--dport ' '{print $2}' | awk '{print $1}' | head -n 1 || true)
- # Add PREROUTING rule
- iptables -t nat -A PREROUTING -i vmbr0 -p "$underlying_protocol" --dport "$protocol_port" -j DNAT --to-destination "$container_ip:$default_port_number"
+ if [[ -n "$existing_port" ]]; then
+ # Port already exists, so just assign it to protocol_port
+ echo "ℹ️ This Container already has a $protocol port at $existing_port"
+ protocol_port="$existing_port"
+ else
+ used_protocol_ports=$(iptables -t nat -S PREROUTING | awk -F'--dport ' '/--dport / {print $2}' | awk '{print $1}')
+ protocol_port=$(comm -23 <(seq 10001 29999 | sort) <(echo "$used_protocol_ports" | sort) | head -n 1 || true)
- # Add POSTROUTING rule
- iptables -t nat -A POSTROUTING -o vmbr0 -p "$underlying_protocol" -d "$container_ip" --dport "$default_port_number" -j MASQUERADE
+ if [[ -z "protocol_port" ]]; then
+ echo "❌ No available $protocol ports found"
+ exit 2
+ fi
- fi
+ # Add PREROUTING rule
+ iptables -t nat -A PREROUTING -i vmbr0 -p "$underlying_protocol" --dport "$protocol_port" -j DNAT --to-destination "$container_ip:$default_port_number"
- list_all_protocols+=("$protocol")
- list_all_ports+=("$protocol_port")
+ # Add POSTROUTING rule
+ iptables -t nat -A POSTROUTING -o vmbr0 -p "$underlying_protocol" -d "$container_ip" --dport "$default_port_number" -j MASQUERADE
+ fi
- done < <(tac "$ADDITIONAL_PROTOCOLS")
+ list_all_protocols+=("$protocol")
+ list_all_ports+=("$protocol_port")
+ done < <(tac "$ADDITIONAL_PROTOCOLS")
- # Space Seperate Lists
+ # Space Seperate Lists
+ ss_protocols="$(IFS=, ; echo "${list_all_protocols[*]}")"
+ ss_ports="$(IFS=, ; echo "${list_all_ports[*]}")"
- ss_protocols="$(IFS=, ; echo "${list_all_protocols[*]}")"
- ss_ports="$(IFS=, ; echo "${list_all_ports[*]}")"
+ #Update NGINX port map JSON on the remote host safely using a heredoc and positional parameters
- #Update NGINX port map JSON on the remote host safely using a heredoc and positional parameters
-
-ssh root@10.15.20.69 bash -s -- "$hostname" "$container_ip" "$ssh_port" "$http_port" "$ss_protocols" "$ss_ports" <<'EOF'
+ ssh root@10.15.20.69 bash -s -- "$hostname" "$container_ip" "$ssh_port" "$http_port" "$ss_protocols" "$ss_ports" "$proxmox_user" "$os_release" <<'EOF'
set -euo pipefail
hostname="$1"
@@ -123,45 +111,61 @@ ssh_port="$3"
http_port="$4"
protos_json=$(echo "$5" | tr ',' '\n' | jq -R . | jq -s .)
ports_json=$(echo "$6" | tr ',' '\n' | jq -R . | jq -s 'map(tonumber)')
+user="$7"
+os_release="$8"
jq --arg hn "$hostname" \
--arg ip "$container_ip" \
+ --arg user "$user" \
+ --arg osr "$os_release" \
--argjson ssh "$ssh_port" \
--argjson http "$http_port" \
--argjson protos "$protos_json" \
--argjson ports_list "$ports_json" \
- '. + {($hn): {ip: $ip, ports: ( reduce range(0; $protos | length) as $i ( {ssh: $ssh, http: $http}; . + { ($protos[$i]): $ports_list[$i]}))}}' /etc/nginx/port_map.json > /tmp/port_map.json.new
+ '. + {($hn): {
+ ip: $ip,
+ user: $user,
+ os_release: $osr,
+ ports: ( reduce range(0; $protos | length) as $i (
+ {ssh: $ssh, http: $http};
+ . + { ($protos[$i]): $ports_list[$i]}
+ ))
+ }}' /etc/nginx/port_map.json > /tmp/port_map.json.new
mv -f /tmp/port_map.json.new /etc/nginx/port_map.json
nginx -s reload
EOF
-
else
-
-# Update NGINX port map JSON on the remote host safely using a heredoc and positional parameters
-
-ssh root@10.15.20.69 bash -s -- "$hostname" "$container_ip" "$ssh_port" "$http_port" <<'EOF'
+ # Update NGINX port map JSON on the remote host safely using a heredoc and positional parameters
+ ssh root@10.15.20.69 bash -s -- "$hostname" "$container_ip" "$ssh_port" "$http_port" "$proxmox_user" "$os_release" <<'EOF'
set -euo pipefail
hostname="$1"
container_ip="$2"
ssh_port="$3"
http_port="$4"
+user="$5"
+os_release="$6"
jq --arg hn "$hostname" \
- --arg ip "$container_ip" \
- --argjson http "$http_port" \
- --argjson ssh "$ssh_port" \
- '. + {($hn): {ip: $ip, ports: {ssh: $ssh, http: $http}}}' /etc/nginx/port_map.json > /tmp/port_map.json.new
+ --arg ip "$container_ip" \
+ --arg user "$user" \
+ --arg osr "$os_release" \
+ --argjson http "$http_port" \
+ --argjson ssh "$ssh_port" \
+ '. + {($hn): {
+ ip: $ip,
+ user: $user,
+ os_release: $osr,
+ ports: {ssh: $ssh, http: $http}
+ }}' /etc/nginx/port_map.json > /tmp/port_map.json.new
mv -f /tmp/port_map.json.new /etc/nginx/port_map.json
nginx -s reload
EOF
-
fi
# Results
-
# Define high-contrast colors
BOLD='\033[1m'
BLUE='\033[34m'
@@ -174,21 +178,21 @@ RESET='\033[0m'
echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
echo -e "${BOLD}🔔 ${MAGENTA}COPY THESE PORTS DOWN${RESET} — ${CYAN}For External Access${RESET}"
echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
-echo -e "📌 ${BLUE}Note:${RESET} Your container listens on default ports internally,"
-echo -e " but EXTERNAL traffic must use the ports listed below:"
+echo -e "📌 ${BLUE}Note:${RESET} Your container listens on SSH Port 22 internally,"
+echo -e " but EXTERNAL traffic must use the SSH port listed below:"
echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
# Port info
echo -e "✅ ${GREEN}Hostname Registration:${RESET} $hostname → $container_ip"
-echo -e "🔐 ${MAGENTA}SSH Port :${RESET} $ssh_port"
-echo -e "🌐 ${BLUE}HTTP Port :${RESET} $http_port"
+echo -e "🔐 ${MAGENTA}SSH Port :${RESET} $ssh_port"
+echo -e "🌐 ${BLUE}HTTP Port :${RESET} $http_port"
# Additional protocols (if any)
if [ ! -z "$ADDITIONAL_PROTOCOLS" ]; then
for i in "${!list_all_protocols[@]}"; do
- echo -e "📡 ${CYAN}${list_all_protocols[$i]} Port :${RESET} ${list_all_ports[$i]}"
+ echo -e "📡 ${CYAN}${list_all_protocols[$i]} Port :${RESET} ${list_all_ports[$i]}"
done
fi
# Bottom border
-echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
\ No newline at end of file
+echo -e "${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"