diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 68f5b22..1d81bcf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -39,24 +39,24 @@ jobs: if: github.ref != 'refs/heads/production' # Skip tests on production permissions: contents: read - security-events: read + security-events: write actions: read outputs: coverage: ${{ steps.coverage.outputs.percentage }} steps: - name: Checkout repository uses: actions/checkout@v4 - + - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: '1.25' + go-version: '1.26' - name: Cache Go modules uses: actions/cache@v4 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum', '**/go.mod') }} restore-keys: | ${{ runner.os }}-go- @@ -95,6 +95,10 @@ jobs: if [ ! -f results.sarif ]; then echo '{"runs": []}' > results.sarif fi + # Ensure file is valid JSON + if ! jq empty results.sarif 2>/dev/null; then + echo '{"runs": []}' > results.sarif + fi - name: Upload SARIF file uses: github/codeql-action/upload-sarif@v4 @@ -118,9 +122,9 @@ jobs: uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: '1.25' + go-version: '1.26' - name: Check formatting run: | @@ -244,11 +248,21 @@ jobs: exit 1 fi - if [ ! -f "deployments/migration-009-alerts-table.sql" ]; then + if [ ! -f "deployments/postgres/migration-009-alerts-table.sql" ]; then echo "❌ Alerts migration file not found!" exit 1 fi + if [ ! -f "deployments/postgres/migration-010-server-source-identifiers.sql" ]; then + echo "❌ Server source identifiers migration file not found!" + exit 1 + fi + + if [ ! -f "deployments/postgres/migration-011-add-telegram-id.sql" ]; then + echo "❌ Telegram ID migration file not found!" + exit 1 + fi + # Check for WebSocket implementation if [ ! -f "internal/websocket/server.go" ]; then echo "❌ WebSocket server file not found!" @@ -260,8 +274,67 @@ jobs: exit 1 fi + # Check for TimescaleDB multi-tier implementation + if [ ! -f "deployments/timescaledb/timescaledb-multi-tier.sql" ]; then + echo "❌ TimescaleDB multi-tier migration file not found!" + exit 1 + fi + + if ! grep -q "metrics_1m_avg" deployments/timescaledb/timescaledb-multi-tier.sql; then + echo "❌ 1-minute metrics aggregate not found!" + exit 1 + fi + + if ! grep -q "metrics_5m_avg" deployments/timescaledb/timescaledb-multi-tier.sql; then + echo "❌ 5-minute metrics aggregate not found!" + exit 1 + fi + + if ! grep -q "get_metrics_by_granularity" deployments/timescaledb/timescaledb-multi-tier.sql; then + echo "❌ Multi-tier granularity function not found!" + exit 1 + fi + + # Check for optimized granularity system + if [ ! -f "deployments/timescaledb/timescaledb-optimized-granularity.sql" ]; then + echo "❌ Optimized granularity migration file not found!" + exit 1 + fi + + if [ ! -f "deployments/timescaledb/timescaledb-optimized-function.sql" ]; then + echo "❌ Optimized granularity function migration file not found!" + exit 1 + fi + + if [ ! -f "deployments/timescaledb/migration-012-optimized-granularity.sql" ]; then + echo "❌ Optimized granularity master migration file not found!" + exit 1 + fi + + if ! grep -q "metrics_30m_avg" deployments/timescaledb/timescaledb-optimized-granularity.sql; then + echo "❌ 30-minute metrics aggregate not found!" + exit 1 + fi + + if ! grep -q "metrics_2h_avg" deployments/timescaledb/timescaledb-optimized-granularity.sql; then + echo "❌ 2-hour metrics aggregate not found!" + exit 1 + fi + + if ! grep -q "metrics_6h_avg" deployments/timescaledb/timescaledb-optimized-granularity.sql; then + echo "❌ 6-hour metrics aggregate not found!" + exit 1 + fi + + if ! grep -q "migration-012-optimized-granularity.sql" deployments/timescaledb/migration-012-optimized-granularity.sql; then + echo "❌ Migration 012 self-reference not found!" + exit 1 + fi + echo "✅ Alert system implementation verified!" echo "✅ WebSocket implementation verified!" + echo "✅ TimescaleDB multi-tier implementation verified!" + echo "✅ Optimized granularity system verified!" # Deploy to Production deploy: @@ -317,9 +390,9 @@ jobs: environment: - HOST=0.0.0.0 - PORT=8080 - - DATABASE_URL=postgres://servereye:\${POSTGRES_PASSWORD}@api-postgres:5432/servereye?sslmode=disable - - KEYS_DATABASE_URL=postgres://servereye:\${POSTGRES_PASSWORD}@api-postgres:5432/servereye?sslmode=disable - - TIMESCALEDB_URL=postgres://servereye:\${POSTGRES_PASSWORD}@api-timescaledb:5432/servereye?sslmode=disable + - DATABASE_URL=postgres://postgres:\${POSTGRES_PASSWORD}@api-postgres:5432/servereye?sslmode=disable + - KEYS_DATABASE_URL=postgres://postgres:\${POSTGRES_PASSWORD}@api-postgres:5432/servereye?sslmode=disable + - TIMESCALEDB_URL=postgres://postgres:\${POSTGRES_PASSWORD}@api-timescaledb:5432/servereye?sslmode=disable - STATIC_DATA_URL=postgres://servereye:\${POSTGRES_PASSWORD}@api-postgres-static:5432/servereye?sslmode=disable - JWT_SECRET=\${JWT_SECRET} - WEBHOOK_SECRET=\${WEBHOOK_SECRET} @@ -348,9 +421,9 @@ jobs: sudo tee .env > /dev/null <> /var/lib/postgresql/data/pg_hba.conf" - sudo docker exec ServereyeAPI-postgres sh -c "echo 'host all all 0.0.0.0/0 md5' >> /var/lib/postgresql/data/pg_hba.conf" - sudo docker exec ServereyeAPI-postgres psql -U postgres -c "SELECT pg_reload_conf();" + # PostgreSQL main - no need to create servereye user, using postgres + echo "Configuring main PostgreSQL..." + sudo docker exec ServerEyeAPI-postgres psql -U postgres -d servereye -c "SELECT 1;" || echo "Database not ready yet" - # TimescaleDB - sudo docker exec ServereyeAPI-timescaledb psql -U postgres -c "CREATE USER servereye WITH SUPERUSER PASSWORD '${{ secrets.POSTGRES_PASSWORD }}';" || echo "User servereye already exists" - sudo docker exec ServereyeAPI-timescaledb sh -c "echo 'host all all 172.20.0.0/16 trust' >> /var/lib/postgresql/data/pg_hba.conf" - sudo docker exec ServereyeAPI-timescaledb sh -c "echo 'host all all 0.0.0.0/0 md5' >> /var/lib/postgresql/data/pg_hba.conf" - sudo docker exec ServereyeAPI-timescaledb psql -U postgres -c "SELECT pg_reload_conf();" + # TimescaleDB - no need to create servereye user, using postgres + echo "Configuring TimescaleDB..." + sudo docker exec ServerEyeAPI-timescaledb psql -U postgres -d servereye -c "SELECT 1;" || echo "Database not ready yet" - # PostgreSQL static - sudo docker exec ServereyeAPI-postgres-static psql -U postgres -c "CREATE USER servereye WITH SUPERUSER PASSWORD '${{ secrets.POSTGRES_PASSWORD }}';" || echo "User servereye already exists" - sudo docker exec ServereyeAPI-postgres-static sh -c "echo 'host all all 172.20.0.0/16 trust' >> /var/lib/postgresql/data/pg_hba.conf" - sudo docker exec ServereyeAPI-postgres-static sh -c "echo 'host all all 0.0.0.0/0 md5' >> /var/lib/postgresql/data/pg_hba.conf" - sudo docker exec ServereyeAPI-postgres-static psql -U postgres -c "SELECT pg_reload_conf();" + # PostgreSQL static - using servereye user (already created by docker-compose) + echo "Configuring static PostgreSQL..." + sudo docker exec ServerEyeAPI-postgres-static psql -U servereye -d servereye -c "SELECT 1;" || echo "Database not ready yet" # Run database migrations echo "🗄️ Running database migrations..." + # Initialize main PostgreSQL schema first + echo "📝 Initializing main PostgreSQL schema..." + if [ -f "./deployments/init-schema.sql" ]; then + sudo docker exec -i ServerEyeAPI-postgres psql -U postgres -d servereye < "./deployments/init-schema.sql" || echo "Schema already initialized" + fi + # Run PostgreSQL migrations (main database) echo "📝 Running PostgreSQL migrations for main database..." - for migration in ./deployments/migration-001-*.sql ./deployments/migration-002-*.sql ./deployments/migration-003-*.sql ./deployments/migration-004-*.sql ./deployments/migration-009-*.sql; do + for migration in ./deployments/postgres/migration-*.sql; do if [ -f "$migration" ]; then echo "Applying migration: $(basename $migration)" - sudo docker exec ServereyeAPI-postgres psql -U postgres -d servereye -f "/migrations/$(basename $migration)" || echo "Migration already applied or failed: $(basename $migration)" + sudo docker exec -i ServerEyeAPI-postgres psql -U postgres -d servereye < "$migration" || echo "Migration already applied or failed: $(basename $migration)" fi done - # Run TimescaleDB migrations - echo "📝 Running TimescaleDB migrations..." - sudo docker exec ServereyeAPI-timescaledb psql -U postgres -d servereye -f /docker-entrypoint-initdb.d/init-timescaledb.sql || echo "TimescaleDB already initialized" + # Run TimescaleDB init first + echo "📝 Initializing TimescaleDB..." + if [ -f "./deployments/timescaledb/timescaledb-init.sql" ]; then + sudo docker exec -i ServerEyeAPI-timescaledb psql -U postgres -d servereye < "./deployments/timescaledb/timescaledb-init.sql" || echo "TimescaleDB already initialized" + fi + + # Run TimescaleDB multi-tier + echo "� Setting up TimescaleDB multi-tier..." + if [ -f "./deployments/timescaledb/timescaledb-multi-tier.sql" ]; then + sudo docker exec -i ServerEyeAPI-timescaledb psql -U postgres -d servereye < "./deployments/timescaledb/timescaledb-multi-tier.sql" || echo "Multi-tier already configured" + fi + + # Add missing column if needed + echo "📝 Adding storage_temperatures column if missing..." + sudo docker exec ServerEyeAPI-timescaledb psql -U postgres -d servereye -c "ALTER TABLE server_metrics ADD COLUMN IF NOT EXISTS storage_temperatures JSONB;" || echo "Column already exists" - # Apply migration-008 to TimescaleDB - echo "📝 Applying migration-008 to TimescaleDB..." - sudo docker exec ServereyeAPI-timescaledb psql -U postgres -d servereye -v ON_ERROR_STOP=1 -c "ALTER TABLE server_metrics ADD COLUMN IF NOT EXISTS storage_temperatures JSONB;" - sudo docker exec ServereyeAPI-timescaledb psql -U postgres -d servereye -v ON_ERROR_STOP=1 -c "UPDATE server_metrics SET storage_temperatures = '[]' WHERE storage_temperatures IS NULL;" - sudo docker exec ServereyeAPI-timescaledb psql -U postgres -d servereye -v ON_ERROR_STOP=1 -c "CREATE INDEX IF NOT EXISTS idx_server_metrics_storage_temperatures ON server_metrics USING GIN (storage_temperatures) WHERE storage_temperatures IS NOT NULL;" - sudo docker exec ServereyeAPI-timescaledb psql -U postgres -d servereye -v ON_ERROR_STOP=1 -c "COMMENT ON COLUMN server_metrics.storage_temperatures IS 'JSON array of storage device temperatures with device name, type, and temperature';" + # Verify TimescaleDB tables + echo "🔍 Verifying TimescaleDB tables..." + TABLE_COUNT=$(sudo docker exec ServerEyeAPI-timescaledb psql -U postgres -d servereye -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema='public' AND table_name IN ('server_metrics', 'server_status', 'server_commands', 'server_events');" | tr -d ' ') + if [ "$TABLE_COUNT" -eq "4" ]; then + echo "✅ All TimescaleDB hypertables created successfully" + else + echo "⚠️ Expected 4 hypertables, found: $TABLE_COUNT" + fi - # Create servereye database in postgres-static - echo "📝 Creating servereye database in postgres-static..." - sudo docker exec ServereyeAPI-postgres-static psql -U postgres -c "CREATE DATABASE servereye;" || echo "Database servereye already exists" + # Verify multi-tier aggregates (non-critical) + AGG_COUNT=$(sudo docker exec ServerEyeAPI-timescaledb psql -U postgres -d servereye -t -c "SELECT COUNT(*) FROM information_schema.tables WHERE table_name LIKE 'metrics_%_avg';" | tr -d ' ') + echo "📊 Found $AGG_COUNT multi-tier aggregates" # Run static data migrations (postgres-static database) echo "📝 Running static data migrations..." - for migration in ./deployments/migration-005-*.sql ./deployments/migration-006-*.sql ./deployments/migration-007-*.sql; do - if [ -f "$migration" ]; then + + # Initialize static data schema + if [ -f "./deployments/static-postgres/migration-005-static-data.sql" ]; then + echo "Initializing static data schema..." + sudo docker exec -i ServerEyeAPI-postgres-static psql -U servereye -d servereye < "./deployments/static-postgres/migration-005-static-data.sql" || echo "Static schema already initialized" + fi + + # Apply other static migrations + for migration in ./deployments/static-postgres/migration-*.sql; do + if [ -f "$migration" ] && [[ "$migration" != *"migration-005"* ]]; then echo "Applying static data migration: $(basename $migration)" - sudo docker exec -i ServereyeAPI-postgres-static psql -U postgres -d servereye < "$migration" || echo "Static migration already applied or failed: $(basename $migration)" + sudo docker exec -i ServerEyeAPI-postgres-static psql -U servereye -d servereye < "$migration" || echo "Static migration already applied or failed: $(basename $migration)" fi done @@ -470,7 +570,7 @@ jobs: # Check logs echo "📋 Checking service logs..." - sudo docker-compose logs --tail=50 + sudo docker compose logs --tail=50 # Check API container logs specifically echo "🔍 Checking API container logs..." @@ -541,4 +641,12 @@ jobs: echo "⚠️ Static info endpoints may not be configured" fi + # Test multi-tier metrics endpoints + echo "🔍 Testing multi-tier metrics endpoints..." + if curl -I -s "http://${{ secrets.PROD_HOST }}:8080/api/servers/test-server/metrics/tiered?start=$(date -d '1 hour ago' -Iseconds)&end=$(date -Iseconds)" | grep -q "200\|401\|404"; then + echo "✅ Multi-tier metrics endpoints are configured" + else + echo "⚠️ Multi-tier metrics endpoints may not be configured" + fi + echo "✅ Post-deployment tests completed!" diff --git a/Dockerfile b/Dockerfile index 0df9225..76f0b6f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,7 +19,7 @@ # SOFTWARE. # Build stage -FROM golang:1.25-alpine AS builder +FROM golang:1.26-alpine AS builder # Install git and ca-certificates RUN apk add --no-cache git ca-certificates tzdata diff --git a/Makefile b/Makefile index 0d13750..f663b00 100644 --- a/Makefile +++ b/Makefile @@ -144,6 +144,19 @@ docker-compose-down: docker-compose-logs: docker-compose logs -f +# Database management +db-migrate: + @echo "🗄️ Running database migrations..." + docker exec -i ServereyeAPI-timescaledb psql -U postgres -d servereye < deployments/timescaledb/timescaledb-multi-tier.sql + +db-status: + @echo "📊 Checking database status..." + docker exec ServereyeAPI-timescaledb psql -U postgres -d servereye -c "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_name LIKE 'metrics_%_avg' ORDER BY table_name;" + +db-test: + @echo "🧪 Testing multi-tier metrics..." + curl -s "http://localhost:8080/api/servers/srv_17fdfe6d/metrics/tiered?start=$$(date -d '1 hour ago' -Iseconds)&end=$$(date -Iseconds)" | jq '{server_id, granularity, total_points}' + # Release build with optimizations and version RELEASE_LDFLAGS = -w -s \ -X github.com/godofphonk/ServerEyeAPI/internal/version.Version=$(VERSION) \ @@ -196,6 +209,11 @@ help: @echo " docker-compose-down - Stop services" @echo " docker-compose-logs - View service logs" @echo "" + @echo "Database:" + @echo " db-migrate - Run TimescaleDB migrations" + @echo " db-status - Check database aggregates status" + @echo " db-test - Test multi-tier metrics endpoint" + @echo "" @echo "Utilities:" @echo " clean - Clean build artifacts" @echo " deps - Download dependencies" diff --git a/README.md b/README.md index e70ff53..7aa0a0e 100644 --- a/README.md +++ b/README.md @@ -1,333 +1 @@ -# ServerEyeAPI - -[![Go Report Card](https://goreportcard.com/badge/github.com/godofphonk/ServerEyeAPI)](https://goreportcard.com/report/github.com/godofphonk/ServerEyeAPI) - -## Base URL - -```text -https://api.servereye.dev -``` - -## Authentication - -ServerEyeAPI supports multiple authentication methods: - -### 1. API Key Authentication (Recommended) - -For service-to-service communication. - -**Headers:** - -```text -X-API-Key: sk_your_api_key_here -``` - -**Default Development Key:** - -```text -sk_csharp_backend_development_key_change_in_production -``` - -### 2. Server Key Authentication - -For server agents and basic endpoints using server-specific keys. - -### 3. Bearer Token Authentication - -For protected admin endpoints (marked with 🔒). - -## Core Endpoints - -### 🔓 Health Check - -**Endpoint:** `GET /health` - -**Response:** - -```json -{ - "status": "healthy", - "timestamp": "2026-02-18T09:12:12Z", - "version": "1.0.0" -} -``` - ---- - -### 🔓 Server Registration - -**Endpoint:** `POST /RegisterKey` - -**Request:** - -```json -{ - "hostname": "server-01", - "operating_system": "Ubuntu 22.04", - "agent_version": "1.0.0" -} -``` - -**Response:** - -```json -{ - "server_id": "srv_123456789", - "server_key": "sk_abcdef123456", - "status": "registered" -} -``` - ---- - -### 🔓 Metrics - Unified Tiered Endpoint - -**Endpoint:** `GET /api/servers/{server_id}/metrics/tiered` - -**Query Parameters:** -- `start` (string, required): Start time (RFC3339 format) -- `end` (string, required): End time (RFC3339 format) - -**Auto-Granularity Strategy:** -- **Last hour**: 1-minute intervals - -- **Last 3 hours**: 5-minute intervals - -- **Last 24 hours**: 10-minute intervals - -- **Last 30 days**: 1-hour intervals - -**Response:** - -```json -{ - "server_id": "srv_d1dc36d8", - "start_time": "2026-02-17T18:00:00Z", - "end_time": "2026-02-17T19:00:00Z", - "granularity": "1m", - "data_points": [ - { - "timestamp": "2026-02-17T18:00:00Z", - "cpu_avg": 3.31, - "cpu_max": 3.35, - "cpu_min": 3.28, - "memory_avg": 38.35, - "memory_max": 38.85, - "memory_min": 37.84, - "disk_avg": 68, - "disk_max": 68, - "network_avg": 1.24, - "network_max": 5.67, - "temp_avg": 58.89, - "temp_max": 72.87, - "load_avg": 2.12, - "load_max": 2.38, - "sample_count": 60 - } - ], - "total_points": 61, - "network_details": { - "interfaces": [ - { - "name": "enp111s0", - "status": "up", - "rx_bytes": 1674887389, - "tx_bytes": 148772743, - "rx_speed_mbps": 0.024, - "tx_speed_mbps": 0.023 - } - ], - "total_rx_mbps": 0.095, - "total_tx_mbps": 0.183 - }, - "disk_details": { - "disks": [ - { - "path": "/", - "free_gb": 171, - "used_gb": 354, - "total_gb": 553, - "filesystem": "/dev/nvme0n1p2", - "used_percent": 68 - } - ] - }, - "temperature_details": { - "cpu_temperature": 72.87, - "gpu_temperature": 49, - "system_temperature": 0, - "storage_temperatures": {}, - "highest_temperature": 72.87, - "temperature_unit": "celsius" - } -} -``` - -**Usage Examples:** - -```bash -# Realtime (1 hour) -curl "http://localhost:8080/api/servers/srv_d1dc36d8/metrics/tiered?start=2026-02-17T18:00:00Z&end=2026-02-17T19:00:00Z" - -# Historical (24 hours) -curl "http://localhost:8080/api/servers/srv_d1dc36d8/metrics/tiered?start=2026-02-16T19:00:00Z&end=2026-02-17T19:00:00Z" - -# Historical (7 days) -curl "http://localhost:8080/api/servers/srv_d1dc36d8/metrics/tiered?start=2026-02-10T19:00:00Z&end=2026-02-17T19:00:00Z" -``` - -**Note:** If the requested time period has no data, the API returns the latest available metrics with a `message` field. - ---- - -### 🔓 Server Sources Management - -**Add Source:** `POST /api/servers/{server_id}/sources` - -```json -{ - "source": "TGBot" // or "Web" -} -``` - -**Get Sources:** `GET /api/servers/{server_id}/sources` - -**Remove Source:** `DELETE /api/servers/{server_id}/sources/{source}` - ---- - -### � Static Server Information - -**Endpoint:** `POST/PUT /api/servers/{server_id}/static-info` - -Update static/persistent server information (hardware, system details). - -**Request:** - -```json -{ - "server_info": { - "hostname": "gospodin-A620M-Pro-RS", - "os": "Ubuntu", - "os_version": "25.10", - "kernel": "6.17.0-14-generic", - "architecture": "x86_64" - }, - "hardware_info": { - "cpu_model": "AMD Ryzen 5 5600X", - "cpu_cores": 6, - "cpu_threads": 12, - "cpu_frequency_mhz": 3700, - "gpu_model": "NVIDIA GeForce RTX 3080", - "gpu_driver": "550.120", - "gpu_memory_gb": 10, - "total_memory_gb": 32, - "motherboard": "ASRock A620M Pro RS", - "bios_version": "1.20" - }, - "network_interfaces": [ - { - "interface_name": "eth0", - "mac_address": "00:11:22:33:44:55", - "interface_type": "ethernet", - "speed_mbps": 1000, - "vendor": "Realtek", - "driver": "r8169", - "is_physical": true - } - ], - "disk_info": [ - { - "device_name": "/dev/nvme0n1", - "model": "Samsung 980 PRO", - "serial_number": "S5GXNX0T123456", - "size_gb": 1000, - "disk_type": "nvme", - "interface_type": "nvme", - "filesystem": "ext4", - "mount_point": "/", - "is_system_disk": true - } - ] -} -``` - -**Response:** - -```json -{ - "message": "Static information updated successfully", - "server_id": "srv_d1dc36d8" -} -``` - -**Get Static Info:** `GET /api/servers/{server_id}/static-info` - -**Get Hardware Only:** `GET /api/servers/{server_id}/static-info/hardware` - -**Get Network Interfaces:** `GET /api/servers/{server_id}/static-info/network` - -**Get Disk Info:** `GET /api/servers/{server_id}/static-info/disks` - ---- - -### �🔐 API Key Management - -**Create Key:** `POST /api/admin/keys` - -```json -{ - "service_id": "csharp-backend", - "service_name": "C# Web Backend", - "permissions": ["metrics:read", "servers:read"], - "expires_days": 365 -} -``` - -**List Keys:** `GET /api/admin/keys` - -**Get Key Details:** `GET /api/admin/keys/{keyId}` - -**Revoke Key:** `DELETE /api/admin/keys/{keyId}` - ---- - -### 🔒 Server Management (Bearer Token Required) - -**List All Servers:** `GET /api/servers` - -**Get Server Status:** `GET /api/servers/{server_id}/status` - -## Quick Start - -1. **Register a server:** - ```bash - curl -X POST http://localhost:8080/RegisterKey \ - -H "Content-Type: application/json" \ - -d '{"hostname": "my-server", "operating_system": "Ubuntu 22.04"}' - ``` - -2. **Get metrics:** - ```bash - curl "http://localhost:8080/api/servers/YOUR_SERVER_ID/metrics/tiered?start=$(date -d '1 hour ago' -Iseconds)&end=$(date -Iseconds)" - ``` - -3. **Use API key for backend integration:** - ```bash - curl "http://localhost:8080/api/servers/YOUR_SERVER_ID/metrics/tiered?start=2026-02-17T18:00:00Z&end=2026-02-17T19:00:00Z" \ - -H "X-API-Key: sk_csharp_backend_development_key_change_in_production" - ``` - -## Performance - -- **Response time:** <40ms for complex queries -- **Auto-granularity:** Optimized based on time range -- **Data retention:** 90 days (configurable) -- **Max time range:** 30 days per request - -## Error Codes - -- `400` - Bad Request (missing/invalid parameters) -- `401` - Unauthorized (invalid/missing credentials) -- `404` - Not Found (server doesn't exist) -- `500` - Internal Server Error +[![Go Report Card](https://goreportcard.com/badge/github.com/godofphonk/ServerEyeAPI)](https://goreportcard.com/report/github.com/godofphonk/ServerEyeAPI) \ No newline at end of file diff --git a/README.md.bak b/README.md.bak deleted file mode 100644 index c1a762e..0000000 --- a/README.md.bak +++ /dev/null @@ -1,594 +0,0 @@ -# ServerEyeAPI - -[![Go Report Card](https://goreportcard.com/badge/github.com/godofphonk/ServerEyeAPI)](https://goreportcard.com/report/github.com/godofphonk/ServerEyeAPI) - -## Base URL - -```text -https://api.servereye.dev -``` - -## Authentication - -ServerEyeAPI supports multiple authentication methods: - -### 1. Server Key Authentication - -For server agents and basic endpoints using server-specific keys. - -### 2. API Key Authentication - -For service-to-service communication (recommended for backend integration). - -**Headers:** - -```text -X-API-Key: sk_your_api_key_here -``` - -**Default C# Backend API Key:** - -```text -sk_csharp_backend_development_key_change_in_production -``` - -### 3. Bearer Token Authentication - -For protected admin endpoints (marked with 🔒). - -Protected endpoints are marked with 🔒 in the documentation. - -## Endpoints - -### 🔓 Authentication - -#### Register Server Key -Registers a new server and generates authentication credentials. - -**Endpoint:** `POST /RegisterKey` - -**Request Body:** - -```json -{ - "hostname": "server-01", - "operating_system": "Ubuntu 22.04", - "agent_version": "1.0.0" -} -``` - -**Response (201 Created):** - -```json -{ - "server_id": "srv_123456789", - "server_key": "sk_abcdef123456", - "status": "registered" -} -``` - ---- - -### 🔓 Health Check - -#### System Health - -Checks the health status of the API server and its dependencies. - -**Endpoint:** `GET /health` - -**Response (200 OK):** - -```json -{ - "status": "healthy", - "timestamp": "2026-01-19T05:29:00Z", - "version": "1.0.0", - "clients": 5 -} -``` - - ---- - -### 🔓 Metrics (Public) - -#### Get Server Metrics by ID - -Retrieves current metrics and status for a specific server. - -**Endpoint:** `GET /api/servers/{server_id}/metrics` **KEY FOR TEST - "key_954492a7"** - -**Path Parameters:** - -- `server_id` (string, required): Unique server identifier - -**Response (200 OK):** - -```json -{ - "server_id": "srv_123456789", - "status": { - "online": true, - "last_seen": "2026-01-19T05:29:00Z", - "cpu_usage": 45.2, - "memory_usage": 67.8, - "disk_usage": 23.1, - "network_rx": 1024, - "network_tx": 2048 - }, - "metrics": { - "timestamp": "2026-01-19T05:29:00Z", - "uptime": 86400, - "load_average": [0.5, 0.3, 0.2], - "processes": 156 - } -} -``` - -**Error Responses:** - -- `400 Bad Request` - Missing server_id -- `404 Not Found` - Server not found -- `500 Internal Server Error` - Failed to retrieve metrics - -#### Get Server Metrics by Key -Retrieves metrics using server key instead of ID (for Telegram bot integration). - -**Endpoint:** `GET /api/servers/by-key/{server_key}/metrics` - -**Path Parameters:** -- `server_key` (string, required): Server authentication **KEY FOR TEST - "key_954492a7"** - -**Response (200 OK):** -```json -{ - "server_id": "srv_123456789", - "server_key": "sk_abcdef123456", - "status": { - "online": true, - "last_seen": "2026-01-19T05:29:00Z", - "cpu_usage": 45.2, - "memory_usage": 67.8, - "disk_usage": 23.1 - }, - "metrics": { - "timestamp": "2026-01-19T05:29:00Z", - "uptime": 86400, - "load_average": [0.5, 0.3, 0.2] - } -} -``` - ---- - -### 🔓 Server Sources Management (Public) - -#### Add Server Source by ID -Adds a notification source for a specific server. - -**Endpoint:** `POST /api/servers/{server_id}/sources` - -**Path Parameters:** -- `server_id` (string, required): Unique server identifier - -**Request Body:** -```json -{ - "source": "TGBot" -} -``` - -**Source Values:** -- `"TGBot"` - Telegram Bot -- `"Web"` - Web dashboard - -**Response (200 OK):** -```json -{ - "server_id": "srv_123456789", - "source": "TGBot", - "message": "Source added successfully" -} -``` - -#### Get Server Sources by ID -Retrieves all notification sources for a server. - -**Endpoint:** `GET /api/servers/{server_id}/sources` - -**Path Parameters:** -- `server_id` (string, required): Unique server identifier **KEY FOR TEST - "key_954492a7"** - -**Response (200 OK):** -```json -{ - "server_id": "srv_123456789", - "sources": ["TGBot", "Web"] -} -``` - -#### Remove Server Source by ID -Removes a notification source from a server. - -**Endpoint:** `DELETE /api/servers/{server_id}/sources/{source}` - -**Path Parameters:** -- `server_id` (string, required): Unique server identifier -- `source` (string, required): Source type ("TGBot" or "Web") - -**Response (200 OK):** -```json -{ - "server_id": "srv_123456789", - "source": "TGBot", - "message": "Source removed successfully" -} -``` - -#### Add Server Source by Key -Adds notification source using server key. - -**Endpoint:** `POST /api/servers/by-key/{server_key}/sources` - -**Path Parameters:** -- `server_key` (string, required): Server authentication key - -**Request Body:** -```json -{ - "source": "Web" -} -``` - -**Response (200 OK):** -```json -{ - "message": "Source added successfully", - "server_id": "srv_123456789", - "server_key": "sk_abcdef123456", - "source": "Web" -} -``` - -#### Get Server Sources by Key -Retrieves notification sources using server key. - -**Endpoint:** `GET /api/servers/by-key/{server_key}/sources` - -**Path Parameters:** -- `server_key` (string, required): Server authentication key - -**Response (200 OK):** -```json -{ - "server_id": "srv_123456789", - "server_key": "sk_abcdef123456", - "sources": ["TGBot", "Web"] -} -``` - -#### Remove Server Source by Key -Removes notification source using server key. - -**Endpoint:** `DELETE /api/servers/by-key/{server_key}/sources/{source}` - -**Path Parameters:** -- `server_key` (string, required): Server authentication key -- `source` (string, required): Source type ("TGBot" or "Web") - -**Response (200 OK):** -```json -{ - "message": "Source removed successfully", - "server_id": "srv_123456789", - "server_key": "sk_abcdef123456", - "source": "TGBot" -} -``` - ---- - -### 🔐 API Key Management - -#### Create API Key -Creates a new API key for service authentication. - -**Endpoint:** `POST /api/admin/keys` - -**Headers:** -- `X-API-Key: ` - -**Request Body:** -```json -{ - "service_id": "csharp-backend", - "service_name": "C# Web Backend", - "permissions": ["metrics:read", "servers:read", "servers:validate"], - "expires_days": 365 -} -``` - -**Response (201 Created):** -```json -{ - "api_key": "sk_VhausxMPKH40oH66je21EWErL3JmTH8S", - "key_id": "key_j6mdji4Kjm_UiIGn26XQVg", - "service_id": "csharp-backend", - "service_name": "C# Web Backend", - "permissions": ["metrics:read", "servers:read", "servers:validate"], - "created_at": "2026-02-15T16:51:37.881571749Z" -} -``` - -#### List API Keys -Retrieves all API keys. - -**Endpoint:** `GET /api/admin/keys` - -**Headers:** -- `X-API-Key: ` - -**Response (200 OK):** -```json -[ - { - "key_id": "key_j6mdji4Kjm_UiIGn26XQVg", - "service_id": "csharp-backend", - "service_name": "C# Web Backend", - "permissions": ["metrics:read", "servers:read"], - "created_at": "2026-02-15T16:51:37.882303Z", - "is_active": true, - "last_used_at": "2026-02-15T16:52:00Z" - } -] -``` - -#### Get API Key Details -Retrieves details for a specific API key. - -**Endpoint:** `GET /api/admin/keys/{keyId}` - -**Headers:** -- `X-API-Key: ` - -#### Revoke API Key -Deactivates an API key. - -**Endpoint:** `DELETE /api/admin/keys/{keyId}` - -**Headers:** -- `X-API-Key: ` - ---- - -## Metrics Endpoint - -### Overview -The ServerEyeAPI provides a unified metrics endpoint with automatic granularity selection based on time ranges. - -### Granularity Strategy -- **Last hour**: 1-minute intervals -- **Last 3 hours**: 5-minute intervals -- **Last 24 hours**: 10-minute intervals -- **Last 30 days**: 1-hour intervals - -### Get Metrics with Auto-Granularity -Unified endpoint for all metrics queries. Automatically selects the best granularity based on time range. - -**Endpoint:** `GET /api/servers/{server_id}/metrics/tiered` - -**Query Parameters:** -- `start` (string, required): Start time (RFC3339 format) -- `end` (string, required): End time (RFC3339 format) - -**Response (with data):** -```json -{ - "server_id": "srv_71453434", - "start_time": "2026-02-15T19:00:00Z", - "end_time": "2026-02-15T20:00:00Z", - "granularity": "1m", - "data_points": [ - { - "timestamp": "2026-02-15T19:18:00Z", - "cpu_avg": 18.53, - "cpu_max": 18.54, - "cpu_min": 18.52, - "memory_avg": 71.31, - "memory_max": 71.85, - "memory_min": 70.84, - "disk_avg": 66, - "disk_max": 66, - "network_avg": 0.37, - "network_max": 2.12, - "temp_avg": 48.08, - "temp_max": 60.25, - "load_avg": 3.12, - "load_max": 3.75, - "sample_count": 50 - } - ], - "total_points": 26 -} -``` - -**Response (showing available data when requested period is empty):** -```json -{ - "server_id": "srv_71453434", - "start_time": "2026-02-14T19:00:00Z", - "end_time": "2026-02-15T19:00:00Z", - "granularity": "1m", - "data_points": [ - { - "timestamp": "2026-02-15T18:18:00Z", - "cpu_avg": 18.53, - "memory_avg": 71.31, - "..." - } - ], - "total_points": 26, - "message": "Showing available data (requested period had no data)" -} -``` - -**Note:** If the requested time period has no data (e.g., server was recently installed), the API will automatically return the latest available metrics with a `message` field explaining the situation. This ensures the frontend always has data to display. - -### Usage Examples - -**Dashboard (5 minutes):** -```bash -curl "http://localhost:8080/api/servers/srv_71453434/metrics/tiered?start=2026-02-15T19:00:00Z&end=2026-02-15T19:05:00Z" -``` - -**Realtime (1 hour):** -```bash -curl "http://localhost:8080/api/servers/srv_71453434/metrics/tiered?start=2026-02-15T18:00:00Z&end=2026-02-15T19:00:00Z" -``` - -**Historical (6 hours):** -```bash -curl "http://localhost:8080/api/servers/srv_71453434/metrics/tiered?start=2026-02-15T13:00:00Z&end=2026-02-15T19:00:00Z" -``` - -**Historical (24 hours):** -```bash -curl "http://localhost:8080/api/servers/srv_71453434/metrics/tiered?start=2026-02-14T19:00:00Z&end=2026-02-15T19:00:00Z" -``` - -**Historical (7 days):** -```bash -curl "http://localhost:8080/api/servers/srv_71453434/metrics/tiered?start=2026-02-08T19:00:00Z&end=2026-02-15T19:00:00Z" -``` - -**Historical (30 days):** -```bash -curl "http://localhost:8080/api/servers/srv_71453434/metrics/tiered?start=2026-01-16T19:00:00Z&end=2026-02-15T19:00:00Z" -``` - ---- - -## Metrics Management Commands - -The ServerEyeAPI provides commands for managing the multi-tier metrics system. - -### Available Commands - -1. **Refresh Aggregates** - Update continuous aggregates with latest data -2. **Rebuild Aggregates** - Rebuild aggregates from scratch -3. **Cleanup Old Metrics** - Remove old data based on retention policies -4. **Compression Policy** - Apply compression to save storage -5. **RetentionPolicy** - Configure automatic data deletion -6. **Metrics Statistics** - Get storage and performance statistics -7. **Analyze Performance** - Analyze query performance -8. **Export/Import Metrics** - Backup and restore metrics data -9. **Validate Metrics** - Check data integrity -10. **Optimize Storage** - Optimize TimescaleDB performance - -### Example Usage - -```bash -# Refresh all aggregates -curl -X POST http://localhost:8080/api/servers/management/command \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer " \ - -d '{ - "server_id": "management", - "type": "refresh_aggregates", - "payload": {"granularity": "all"} - }' - -# Get metrics statistics -curl -X POST http://localhost:8080/api/servers/management/command \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer " \ - -d '{ - "server_id": "management", - "type": "metrics_stats", - "payload": {} - }' - -# Cleanup old metrics (dry run) -curl -X POST http://localhost:8080/api/servers/management/command \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer " \ - -d '{ - "server_id": "management", - "type": "cleanup_old_metrics", - "payload": { - "older_than": "90 days", - "dry_run": true - } - }' -``` - -For detailed documentation, see [Metrics Management Commands](docs/metrics-commands.md). - ---- - -### 🔒 Server Management (Protected) - -#### List All Servers -Retrieves list of all registered servers with their status. - -**Endpoint:** `GET /api/servers` - -**Headers:** -- `Authorization: Bearer ` - -**Response (200 OK):** -```json -{ - "count": 2, - "servers": [ - { - "server_id": "srv_123456789", - "status": { - "online": true, - "last_seen": "2026-01-19T05:29:00Z", - "cpu_usage": 45.2, - "memory_usage": 67.8 - } - }, - { - "server_id": "srv_987654321", - "status": { - "online": false, - "last_seen": "2026-01-19T05:20:00Z" - } - } - ], - "timestamp": "2026-01-19T05:29:00Z" -} -``` - -#### Get Server Status -Retrieves detailed status information for a specific server. - -**Endpoint:** `GET /api/servers/{server_id}/status` - -**Headers:** -- `Authorization: Bearer ` - -**Path Parameters:** -- `server_id` (string, required): Unique server identifier - -**Response (200 OK):** -```json -{ - "server_id": "srv_123456789", - "status": { - "online": true, - "last_seen": "2026-01-19T05:29:00Z", - "cpu_usage": 45.2, - "memory_usage": 67.8, - "disk_usage": 23.1, - "network_rx": 1024, - "network_tx": 2048, - "uptime": 86400, - "load_average": [0.5, 0.3, 0.2], - "processes": 156 - } -} -``` \ No newline at end of file diff --git a/deployments/README.md b/deployments/README.md new file mode 100644 index 0000000..6022360 --- /dev/null +++ b/deployments/README.md @@ -0,0 +1,82 @@ +# Database Migrations + +This directory contains database migrations organized by database type. + +## Structure + +``` +deployments/ +├── postgres/ # Main PostgreSQL database migrations +├── timescaledb/ # TimescaleDB (metrics) migrations +├── static-postgres/ # Static data PostgreSQL database migrations +└── README.md +``` + +## Migration Files + +### PostgreSQL (Main Database) +**Location:** `deployments/postgres/` + +- `migration-001-server-keys.sql` - Server keys table +- `migration-002-fix-server-table.sql` - Server table fixes +- `migration-003-add-sources-column.sql` - Add sources column +- `migration-004-api-keys.sql` - API keys management +- `migration-009-alerts-table.sql` - Alerts system +- `migration-010-server-source-identifiers.sql` - Server source identifiers +- `migration-011-add-telegram-id.sql` - Telegram ID for account linking + +### TimescaleDB (Metrics Database) +**Location:** `deployments/timescaledb/` + +- `timescaledb-init.sql` - Initial TimescaleDB setup +- `timescaledb-multi-tier.sql` - Multi-tier metrics with auto-granularity + +### Static PostgreSQL (Static Data Database) +**Location:** `deployments/static-postgres/` + +- `migration-005-static-data.sql` - Static server data schema +- `migration-006-memory-motherboard.sql` - Memory and motherboard info +- `migration-007-fix-hardware-info.sql` - Hardware info fixes +- `migration-008-add-storage-temperatures.sql` - Storage temperature tracking + +## Migration Naming Convention + +- **PostgreSQL:** `migration-XXX-description.sql` +- **TimescaleDB:** `timescaledb-description.sql` +- **Static PostgreSQL:** `migration-XXX-description.sql` + +## Applying Migrations + +Migrations are automatically applied during deployment via CI/CD pipeline. + +### Manual Application (Development) + +```bash +# PostgreSQL (main) +docker exec -i ServereyeAPI-postgres psql -U postgres -d servereye < deployments/postgres/migration-XXX.sql + +# TimescaleDB +docker exec -i ServereyeAPI-timescaledb psql -U postgres -d servereye < deployments/timescaledb/timescaledb-XXX.sql + +# Static PostgreSQL +docker exec -i ServereyeAPI-postgres-static psql -U postgres -d servereye < deployments/static-postgres/migration-XXX.sql +``` + +## Database Purposes + +### Main PostgreSQL +- Server registration and metadata +- API keys and authentication +- Server sources and identifiers +- Alerts configuration + +### TimescaleDB +- Time-series metrics data +- Multi-tier aggregations (1m, 5m, 10m, 1h) +- Historical metrics storage + +### Static PostgreSQL +- Server hardware information +- Network interfaces configuration +- Disk information +- Motherboard and memory details diff --git a/deployments/migration-001-server-keys.sql b/deployments/postgres/migration-001-server-keys.sql similarity index 100% rename from deployments/migration-001-server-keys.sql rename to deployments/postgres/migration-001-server-keys.sql diff --git a/deployments/migration-002-fix-server-table.sql b/deployments/postgres/migration-002-fix-server-table.sql similarity index 100% rename from deployments/migration-002-fix-server-table.sql rename to deployments/postgres/migration-002-fix-server-table.sql diff --git a/deployments/migration-003-add-sources-column.sql b/deployments/postgres/migration-003-add-sources-column.sql similarity index 100% rename from deployments/migration-003-add-sources-column.sql rename to deployments/postgres/migration-003-add-sources-column.sql diff --git a/deployments/migration-004-api-keys.sql b/deployments/postgres/migration-004-api-keys.sql similarity index 100% rename from deployments/migration-004-api-keys.sql rename to deployments/postgres/migration-004-api-keys.sql diff --git a/deployments/migration-009-alerts-table.sql b/deployments/postgres/migration-009-alerts-table.sql similarity index 100% rename from deployments/migration-009-alerts-table.sql rename to deployments/postgres/migration-009-alerts-table.sql diff --git a/deployments/postgres/migration-010-server-source-identifiers.sql b/deployments/postgres/migration-010-server-source-identifiers.sql new file mode 100644 index 0000000..fe222c7 --- /dev/null +++ b/deployments/postgres/migration-010-server-source-identifiers.sql @@ -0,0 +1,64 @@ +-- Copyright (c) 2026 godofphonk +-- +-- Permission is hereby granted, free of charge, to any person obtaining a copy +-- of this software and associated documentation files (the "Software"), to deal +-- in the Software without restriction, including without limitation the rights +-- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +-- copies of the Software, and to permit persons to whom the Software is +-- furnished to do so, subject to the following conditions: +-- +-- The above copyright notice and this permission notice shall be included in +-- all copies or substantial portions of the Software. +-- +-- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +-- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +-- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +-- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +-- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +-- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +-- SOFTWARE. + +-- Migration 010: Add server source identifiers table +-- For storing multiple identifiers per source type (TG IDs, user IDs, emails) + +-- Create server_source_identifiers table +CREATE TABLE IF NOT EXISTS server_source_identifiers ( + id SERIAL PRIMARY KEY, + server_id VARCHAR(255) NOT NULL, + source_type VARCHAR(50) NOT NULL, + identifier VARCHAR(255) NOT NULL, + identifier_type VARCHAR(50) NOT NULL, + metadata JSONB DEFAULT '{}', + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + + FOREIGN KEY (server_id) REFERENCES servers(server_id) ON DELETE CASCADE, + UNIQUE(server_id, source_type, identifier) +); + +-- Create indexes for performance +CREATE INDEX IF NOT EXISTS idx_server_source_identifiers_server_id ON server_source_identifiers(server_id); +CREATE INDEX IF NOT EXISTS idx_server_source_identifiers_source_type ON server_source_identifiers(source_type); +CREATE INDEX IF NOT EXISTS idx_server_source_identifiers_identifier ON server_source_identifiers(identifier); +CREATE INDEX IF NOT EXISTS idx_server_source_identifiers_composite ON server_source_identifiers(server_id, source_type); + +-- Create trigger for updated_at timestamp +CREATE OR REPLACE FUNCTION update_server_source_identifiers_updated_at() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = CURRENT_TIMESTAMP; + RETURN NEW; +END; +$$ language 'plpgsql'; + +CREATE TRIGGER trigger_server_source_identifiers_updated_at + BEFORE UPDATE ON server_source_identifiers + FOR EACH ROW + EXECUTE FUNCTION update_server_source_identifiers_updated_at(); + +-- Add comment +COMMENT ON TABLE server_source_identifiers IS 'Stores identifiers for server sources (TG IDs, user IDs, emails)'; +COMMENT ON COLUMN server_source_identifiers.source_type IS 'Type of source: TGBot, Web, Email, etc.'; +COMMENT ON COLUMN server_source_identifiers.identifier IS 'Identifier value: Telegram ID, User ID, Email address'; +COMMENT ON COLUMN server_source_identifiers.identifier_type IS 'Type of identifier: telegram_id, user_id, email'; +COMMENT ON COLUMN server_source_identifiers.metadata IS 'Additional metadata in JSON format'; diff --git a/deployments/postgres/migration-011-add-telegram-id.sql b/deployments/postgres/migration-011-add-telegram-id.sql new file mode 100644 index 0000000..65a8b08 --- /dev/null +++ b/deployments/postgres/migration-011-add-telegram-id.sql @@ -0,0 +1,34 @@ +-- Copyright (c) 2026 godofphonk +-- +-- Permission is hereby granted, free of charge, to any person obtaining a copy +-- of this software and associated documentation files (the "Software"), to deal +-- in the Software without restriction, including without limitation the rights +-- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +-- copies of the Software, and to permit persons to whom the Software is +-- furnished to do so, subject to the following conditions: +-- +-- The above copyright notice and this permission notice shall be included in +-- all copies or substantial portions of the Software. +-- +-- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +-- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +-- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +-- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +-- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +-- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +-- SOFTWARE. + +-- Migration 011: Add telegram_id field to server_source_identifiers +-- For linking accounts between Telegram bot and web application + +-- Add telegram_id column (optional field) +ALTER TABLE server_source_identifiers +ADD COLUMN IF NOT EXISTS telegram_id BIGINT; + +-- Create index for fast lookups by telegram_id +CREATE INDEX IF NOT EXISTS idx_server_source_identifiers_telegram_id +ON server_source_identifiers(telegram_id) +WHERE telegram_id IS NOT NULL; + +-- Add comment +COMMENT ON COLUMN server_source_identifiers.telegram_id IS 'Telegram user ID for linking accounts between TG bot and web application'; diff --git a/deployments/migration-005-static-data.sql b/deployments/static-postgres/migration-005-static-data.sql similarity index 100% rename from deployments/migration-005-static-data.sql rename to deployments/static-postgres/migration-005-static-data.sql diff --git a/deployments/migration-006-memory-motherboard.sql b/deployments/static-postgres/migration-006-memory-motherboard.sql similarity index 100% rename from deployments/migration-006-memory-motherboard.sql rename to deployments/static-postgres/migration-006-memory-motherboard.sql diff --git a/deployments/migration-007-fix-hardware-info.sql b/deployments/static-postgres/migration-007-fix-hardware-info.sql similarity index 100% rename from deployments/migration-007-fix-hardware-info.sql rename to deployments/static-postgres/migration-007-fix-hardware-info.sql diff --git a/deployments/migration-008-add-storage-temperatures.sql b/deployments/static-postgres/migration-008-add-storage-temperatures.sql similarity index 100% rename from deployments/migration-008-add-storage-temperatures.sql rename to deployments/static-postgres/migration-008-add-storage-temperatures.sql diff --git a/deployments/static-postgres/migration-009-fix-numeric-fields.sql b/deployments/static-postgres/migration-009-fix-numeric-fields.sql new file mode 100644 index 0000000..5a63e80 --- /dev/null +++ b/deployments/static-postgres/migration-009-fix-numeric-fields.sql @@ -0,0 +1,37 @@ +-- Copyright (c) 2026 godofphonk +-- +-- Permission is hereby granted, free of charge, to any person obtaining a copy +-- of this software and associated documentation files (the "Software"), to deal +-- in the Software without restriction, including without limitation the rights +-- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +-- copies of the Software, and to permit persons to whom the Software is +-- furnished to do so, subject to the following conditions: +-- +-- The above copyright notice and this permission notice shall be included in +-- all copies or substantial portions of the Software. +-- +-- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +-- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +-- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +-- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +-- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +-- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +-- SOFTWARE. + +-- Migration 009: Fix numeric fields to support decimal values +-- For hardware info that can have decimal values (frequency, memory, etc.) + +-- Convert integer fields to numeric to support decimal values +ALTER TABLE static_data.hardware_info +ALTER COLUMN cpu_frequency_mhz TYPE NUMERIC(10,2) USING cpu_frequency_mhz::NUMERIC(10,2); + +ALTER TABLE static_data.hardware_info +ALTER COLUMN gpu_memory_gb TYPE NUMERIC(10,2) USING gpu_memory_gb::NUMERIC(10,2); + +ALTER TABLE static_data.hardware_info +ALTER COLUMN total_memory_gb TYPE NUMERIC(10,2) USING total_memory_gb::NUMERIC(10,2); + +-- Add comments for clarity +COMMENT ON COLUMN static_data.hardware_info.cpu_frequency_mhz IS 'CPU frequency in MHz (can be decimal)'; +COMMENT ON COLUMN static_data.hardware_info.gpu_memory_gb IS 'GPU memory in GB (can be decimal)'; +COMMENT ON COLUMN static_data.hardware_info.total_memory_gb IS 'Total system memory in GB (can be decimal)'; diff --git a/deployments/static-postgres/migration-010-motherboard-info.sql b/deployments/static-postgres/migration-010-motherboard-info.sql new file mode 100644 index 0000000..21e1035 --- /dev/null +++ b/deployments/static-postgres/migration-010-motherboard-info.sql @@ -0,0 +1,66 @@ +-- Migration 010: Add motherboard_info table +-- This table stores motherboard and BIOS information + +-- Create motherboard_info table +CREATE TABLE IF NOT EXISTS static_data.motherboard_info ( + server_id VARCHAR(255) PRIMARY KEY, + manufacturer VARCHAR(255), + model VARCHAR(255), + chipset VARCHAR(255), + bios_version VARCHAR(255), + bios_date DATE, + bios_vendor VARCHAR(255), + form_factor VARCHAR(255), + max_memory_gb INTEGER, + memory_slots INTEGER, + supported_memory_types TEXT[], -- Array of supported memory types + onboard_video BOOLEAN DEFAULT FALSE, + onboard_audio BOOLEAN DEFAULT FALSE, + onboard_network BOOLEAN DEFAULT FALSE, + sata_ports INTEGER, + sata_speed VARCHAR(50), -- e.g., "6 Gbps", "3 Gbps" + m2_slots INTEGER, + pcie_slots TEXT[], -- Array of PCIe slots info + usb_ports_total INTEGER, + usb_ports_2_0 INTEGER, + usb_ports_3_0 INTEGER, + usb_ports_c INTEGER, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + + -- Foreign key constraint to server_info table + CONSTRAINT fk_motherboard_server + FOREIGN KEY (server_id) + REFERENCES static_data.server_info(server_id) + ON DELETE CASCADE +); + +-- Create indexes for faster queries +CREATE INDEX IF NOT EXISTS idx_motherboard_info_manufacturer +ON static_data.motherboard_info(manufacturer); + +CREATE INDEX IF NOT EXISTS idx_motherboard_info_model +ON static_data.motherboard_info(model); + +-- Create trigger to update updated_at timestamp +CREATE OR REPLACE FUNCTION static_data.update_motherboard_info_timestamp() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ language 'plpgsql'; + +CREATE TRIGGER trigger_update_motherboard_info_timestamp + BEFORE UPDATE ON static_data.motherboard_info + FOR EACH ROW + EXECUTE FUNCTION static_data.update_motherboard_info_timestamp(); + +-- Add comment +COMMENT ON TABLE static_data.motherboard_info IS 'Stores motherboard and BIOS information for each server'; +COMMENT ON COLUMN static_data.motherboard_info.server_id IS 'Foreign key to server_info'; +COMMENT ON COLUMN static_data.motherboard_info.motherboard IS 'Motherboard model/name'; +COMMENT ON COLUMN static_data.motherboard_info.bios_version IS 'BIOS firmware version'; +COMMENT ON COLUMN static_data.motherboard_info.bios_manufacturer IS 'BIOS manufacturer'; +COMMENT ON COLUMN static_data.motherboard_info.bios_release_date IS 'BIOS release date'; +COMMENT ON COLUMN static_data.motherboard_info.bios_characteristics IS 'Array of BIOS characteristics/features'; diff --git a/deployments/timescaledb/migration-012-optimized-granularity.sql b/deployments/timescaledb/migration-012-optimized-granularity.sql new file mode 100644 index 0000000..5b2c12c --- /dev/null +++ b/deployments/timescaledb/migration-012-optimized-granularity.sql @@ -0,0 +1,47 @@ +-- Migration 012: Optimized Granularity System +-- Copyright (c) 2026 godofphonk +-- +-- This migration implements enterprise-level optimized granularity for better visualization performance +-- Reduces data points significantly while maintaining essential information + +-- Apply optimized granularity views +\i deployments/timescaledb/timescaledb-optimized-granularity.sql + +-- Apply optimized granularity function +\i deployments/timescaledb/timescaledb-optimized-function.sql + +-- Verify new aggregates exist +DO $$ +BEGIN + -- Check if all optimized views exist + IF NOT EXISTS (SELECT 1 FROM information_schema.views WHERE table_name = 'metrics_30m_avg') THEN + RAISE EXCEPTION 'metrics_30m_avg view not found after migration'; + END IF; + + IF NOT EXISTS (SELECT 1 FROM information_schema.views WHERE table_name = 'metrics_2h_avg') THEN + RAISE EXCEPTION 'metrics_2h_avg view not found after migration'; + END IF; + + IF NOT EXISTS (SELECT 1 FROM information_schema.views WHERE table_name = 'metrics_6h_avg') THEN + RAISE EXCEPTION 'metrics_6h_avg view not found after migration'; + END IF; + + -- Check if optimized function exists + IF NOT EXISTS (SELECT 1 FROM pg_proc WHERE proname = 'get_metrics_by_granularity') THEN + RAISE EXCEPTION 'get_metrics_by_granularity function not found after migration'; + END IF; + + RAISE NOTICE 'Optimized granularity system migration completed successfully'; +END $$; + +-- Add migration record +INSERT INTO schema_migrations (version, description, applied_at) +VALUES ('012', 'Optimized granularity system for better visualization performance', NOW()) +ON CONFLICT (version) DO NOTHING; + +-- Refresh all new aggregates to ensure they have data +CALL refresh_continuous_aggregate('metrics_30m_avg', NULL, NULL); +CALL refresh_continuous_aggregate('metrics_2h_avg', NULL, NULL); +CALL refresh_continuous_aggregate('metrics_6h_avg', NULL, NULL); + +RAISE NOTICE 'Migration 012: Optimized granularity system applied successfully'; diff --git a/deployments/timescaledb-init.sql b/deployments/timescaledb/timescaledb-init.sql similarity index 100% rename from deployments/timescaledb-init.sql rename to deployments/timescaledb/timescaledb-init.sql diff --git a/deployments/timescaledb-multi-tier.sql b/deployments/timescaledb/timescaledb-multi-tier.sql similarity index 100% rename from deployments/timescaledb-multi-tier.sql rename to deployments/timescaledb/timescaledb-multi-tier.sql diff --git a/deployments/timescaledb/timescaledb-optimized-function.sql b/deployments/timescaledb/timescaledb-optimized-function.sql new file mode 100644 index 0000000..9b1ca2c --- /dev/null +++ b/deployments/timescaledb/timescaledb-optimized-function.sql @@ -0,0 +1,106 @@ +-- Updated get_metrics_by_granularity function with optimized granularities +-- Replaces the old function with new enterprise-level granularity selection + +-- Drop existing function +DROP FUNCTION IF EXISTS get_metrics_by_granularity(TEXT, TIMESTAMPTZ, TIMESTAMPTZ); + +CREATE OR REPLACE FUNCTION get_metrics_by_granularity( + p_server_id TEXT, + p_start_time TIMESTAMPTZ, + p_end_time TIMESTAMPTZ +) +RETURNS TABLE ( + bucket TIMESTAMPTZ, + avg_cpu DOUBLE PRECISION, + max_cpu DOUBLE PRECISION, + min_cpu DOUBLE PRECISION, + avg_memory DOUBLE PRECISION, + max_memory DOUBLE PRECISION, + min_memory DOUBLE PRECISION, + avg_disk DOUBLE PRECISION, + max_disk DOUBLE PRECISION, + avg_network DOUBLE PRECISION, + max_network DOUBLE PRECISION, + sample_count BIGINT, + granularity TEXT +) AS $$ +BEGIN + -- Use 1-minute data for last hour (max 60 points) + IF p_end_time - p_start_time <= INTERVAL '1 hour' THEN + RETURN QUERY + SELECT + m.bucket, m.avg_cpu, m.max_cpu, m.min_cpu, + m.avg_memory, m.max_memory, m.min_memory, + m.avg_disk, m.max_disk, m.avg_network, m.max_network, + m.sample_count, '1m'::TEXT + FROM metrics_1m_avg m + WHERE m.server_id = p_server_id + AND m.bucket BETWEEN p_start_time AND p_end_time + ORDER BY m.bucket; + + -- Use 10-minute data for 1-6 hours (max 36 points) + ELSIF p_end_time - p_start_time <= INTERVAL '6 hours' THEN + RETURN QUERY + SELECT + m.bucket, m.avg_cpu, m.max_cpu, m.min_cpu, + m.avg_memory, m.max_memory, m.min_memory, + m.avg_disk, m.max_disk, m.avg_network, m.max_network, + m.sample_count, '10m'::TEXT + FROM metrics_10m_avg m + WHERE m.server_id = p_server_id + AND m.bucket BETWEEN p_start_time AND p_end_time + ORDER BY m.bucket; + + -- Use 30-minute data for 6-24 hours (max 36 points) + ELSIF p_end_time - p_start_time <= INTERVAL '24 hours' THEN + RETURN QUERY + SELECT + m.bucket, m.avg_cpu, m.max_cpu, m.min_cpu, + m.avg_memory, m.max_memory, m.min_memory, + m.avg_disk, m.max_disk, m.avg_network, m.max_network, + m.sample_count, '30m'::TEXT + FROM metrics_30m_avg m + WHERE m.server_id = p_server_id + AND m.bucket BETWEEN p_start_time AND p_end_time + ORDER BY m.bucket; + + -- Use 2-hour data for 1-7 days (max 84 points) + ELSIF p_end_time - p_start_time <= INTERVAL '7 days' THEN + RETURN QUERY + SELECT + m.bucket, m.avg_cpu, m.max_cpu, m.min_cpu, + m.avg_memory, m.max_memory, m.min_memory, + m.avg_disk, m.max_disk, m.avg_network, m.max_network, + m.sample_count, '2h'::TEXT + FROM metrics_2h_avg m + WHERE m.server_id = p_server_id + AND m.bucket BETWEEN p_start_time AND p_end_time + ORDER BY m.bucket; + + -- Use 6-hour data for 7+ days (max 120 points for 30 days) + ELSE + RETURN QUERY + SELECT + m.bucket, m.avg_cpu, m.max_cpu, m.min_cpu, + m.avg_memory, m.max_memory, m.min_memory, + m.avg_disk, m.max_disk, m.avg_network, m.max_network, + m.sample_count, '6h'::TEXT + FROM metrics_6h_avg m + WHERE m.server_id = p_server_id + AND m.bucket BETWEEN p_start_time AND p_end_time + ORDER BY m.bucket; + END IF; +END; +$$ LANGUAGE plpgsql; + +-- Grant permissions +GRANT EXECUTE ON FUNCTION get_metrics_by_granularity(TEXT, TIMESTAMPTZ, TIMESTAMPTZ) TO server_eye_read; + +-- Add comment +COMMENT ON FUNCTION get_metrics_by_granularity(TEXT, TIMESTAMPTZ, TIMESTAMPTZ) IS +'Optimized function that returns metrics with appropriate granularity for visualization: +- 1m for ≤1h (max 60 points) +- 10m for 1-6h (max 36 points) +- 30m for 6-24h (max 36 points) +- 2h for 1-7d (max 84 points) +- 6h for >7d (max 120 points for 30d)'; diff --git a/deployments/timescaledb/timescaledb-optimized-granularity.sql b/deployments/timescaledb/timescaledb-optimized-granularity.sql new file mode 100644 index 0000000..fb55ef6 --- /dev/null +++ b/deployments/timescaledb/timescaledb-optimized-granularity.sql @@ -0,0 +1,247 @@ +-- Optimized granularity views for better visualization performance +-- Based on enterprise-level requirements: 30m, 2h, 6h granularities + +-- Drop existing optimized views if they exist +DROP MATERIALIZED VIEW IF EXISTS metrics_30m_avg CASCADE; +DROP MATERIALIZED VIEW IF EXISTS metrics_2h_avg CASCADE; +DROP MATERIALIZED VIEW IF EXISTS metrics_6h_avg CASCADE; + +-- Level 3.5: 30-minute metrics (6-24 hours) - optimized for dashboard visualization +CREATE MATERIALIZED VIEW metrics_30m_avg WITH (timescaledb.continuous) AS +SELECT + time_bucket('30 minutes', time) AS bucket, + server_id, + hostname, + os_info, + -- Core metrics + AVG(cpu_usage) as avg_cpu, + MAX(cpu_usage) as max_cpu, + MIN(cpu_usage) as min_cpu, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY cpu_usage) as p95_cpu, + + AVG(memory_usage) as avg_memory, + MAX(memory_usage) as max_memory, + MIN(memory_usage) as min_memory, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY memory_usage) as p95_memory, + + AVG(disk_usage) as avg_disk, + MAX(disk_usage) as max_disk, + MIN(disk_usage) as min_disk, + + -- Network totals + AVG(network_usage) as avg_network, + MAX(network_usage) as max_network, + SUM(network_usage) as total_network, + + -- Temperature statistics + AVG(cpu_temperature) as avg_cpu_temp, + MAX(cpu_temperature) as max_cpu_temp, + AVG(highest_temperature) as avg_highest_temp, + MAX(highest_temperature) as max_highest_temp, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY highest_temperature) as p95_temp, + + -- Load averages + AVG(load_avg_1m) as avg_load_1m, + MAX(load_avg_1m) as max_load_1m, + AVG(load_avg_5m) as avg_load_5m, + MAX(load_avg_5m) as max_load_5m, + AVG(load_avg_15m) as avg_load_15m, + MAX(load_avg_15m) as max_load_15m, + + -- Memory utilization (calculated from memory fields) + AVG(CASE WHEN memory_total_gb > 0 THEN (memory_used_gb / memory_total_gb) * 100 ELSE 0 END) as avg_memory_util, + MAX(CASE WHEN memory_total_gb > 0 THEN (memory_used_gb / memory_total_gb) * 100 ELSE 0 END) as max_memory_util, + MIN(CASE WHEN memory_total_gb > 0 THEN (memory_used_gb / memory_total_gb) * 100 ELSE 0 END) as min_memory_util, + + -- Process count (using processes_total) + AVG(processes_total) as avg_process_count, + MAX(processes_total) as max_process_count, + + -- Uptime (using uptime_seconds) + MAX(uptime_seconds) as max_uptime, + + -- Sample statistics + COUNT(*) as sample_count, + MIN(time) as first_seen, + MAX(time) as last_seen +FROM server_metrics +GROUP BY bucket, server_id, hostname, os_info; + +-- Level 4.5: 2-hour metrics (1-7 days) - optimized for weekly trends +CREATE MATERIALIZED VIEW metrics_2h_avg WITH (timescaledb.continuous) AS +SELECT + time_bucket('2 hours', time) AS bucket, + server_id, + hostname, + os_info, + -- Core metrics + AVG(cpu_usage) as avg_cpu, + MAX(cpu_usage) as max_cpu, + MIN(cpu_usage) as min_cpu, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY cpu_usage) as p95_cpu, + + AVG(memory_usage) as avg_memory, + MAX(memory_usage) as max_memory, + MIN(memory_usage) as min_memory, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY memory_usage) as p95_memory, + + AVG(disk_usage) as avg_disk, + MAX(disk_usage) as max_disk, + MIN(disk_usage) as min_disk, + + -- Network totals + AVG(network_usage) as avg_network, + MAX(network_usage) as max_network, + SUM(network_usage) as total_network, + + -- Temperature statistics + AVG(cpu_temperature) as avg_cpu_temp, + MAX(cpu_temperature) as max_cpu_temp, + AVG(highest_temperature) as avg_highest_temp, + MAX(highest_temperature) as max_highest_temp, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY highest_temperature) as p95_temp, + + -- Load averages + AVG(load_avg_1m) as avg_load_1m, + MAX(load_avg_1m) as max_load_1m, + AVG(load_avg_5m) as avg_load_5m, + MAX(load_avg_5m) as max_load_5m, + AVG(load_avg_15m) as avg_load_15m, + MAX(load_avg_15m) as max_load_15m, + + -- Memory utilization (calculated from memory fields) + AVG(CASE WHEN memory_total_gb > 0 THEN (memory_used_gb / memory_total_gb) * 100 ELSE 0 END) as avg_memory_util, + MAX(CASE WHEN memory_total_gb > 0 THEN (memory_used_gb / memory_total_gb) * 100 ELSE 0 END) as max_memory_util, + MIN(CASE WHEN memory_total_gb > 0 THEN (memory_used_gb / memory_total_gb) * 100 ELSE 0 END) as min_memory_util, + + -- Process count (using processes_total) + AVG(processes_total) as avg_process_count, + MAX(processes_total) as max_process_count, + + -- Uptime (using uptime_seconds) + MAX(uptime_seconds) as max_uptime, + + -- Sample statistics + COUNT(*) as sample_count, + MIN(time) as first_seen, + MAX(time) as last_seen +FROM server_metrics +GROUP BY bucket, server_id, hostname, os_info; + +-- Level 5: 6-hour metrics (7-30+ days) - optimized for monthly analysis +CREATE MATERIALIZED VIEW metrics_6h_avg WITH (timescaledb.continuous) AS +SELECT + time_bucket('6 hours', time) AS bucket, + server_id, + hostname, + os_info, + -- Core metrics + AVG(cpu_usage) as avg_cpu, + MAX(cpu_usage) as max_cpu, + MIN(cpu_usage) as min_cpu, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY cpu_usage) as p95_cpu, + + AVG(memory_usage) as avg_memory, + MAX(memory_usage) as max_memory, + MIN(memory_usage) as min_memory, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY memory_usage) as p95_memory, + + AVG(disk_usage) as avg_disk, + MAX(disk_usage) as max_disk, + MIN(disk_usage) as min_disk, + + -- Network totals + AVG(network_usage) as avg_network, + MAX(network_usage) as max_network, + SUM(network_usage) as total_network, + + -- Temperature statistics + AVG(cpu_temperature) as avg_cpu_temp, + MAX(cpu_temperature) as max_cpu_temp, + AVG(highest_temperature) as avg_highest_temp, + MAX(highest_temperature) as max_highest_temp, + PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY highest_temperature) as p95_temp, + + -- Load averages + AVG(load_avg_1m) as avg_load_1m, + MAX(load_avg_1m) as max_load_1m, + AVG(load_avg_5m) as avg_load_5m, + MAX(load_avg_5m) as max_load_5m, + AVG(load_avg_15m) as avg_load_15m, + MAX(load_avg_15m) as max_load_15m, + + -- Memory utilization (calculated from memory fields) + AVG(CASE WHEN memory_total_gb > 0 THEN (memory_used_gb / memory_total_gb) * 100 ELSE 0 END) as avg_memory_util, + MAX(CASE WHEN memory_total_gb > 0 THEN (memory_used_gb / memory_total_gb) * 100 ELSE 0 END) as max_memory_util, + MIN(CASE WHEN memory_total_gb > 0 THEN (memory_used_gb / memory_total_gb) * 100 ELSE 0 END) as min_memory_util, + + -- Process count (using processes_total) + AVG(processes_total) as avg_process_count, + MAX(processes_total) as max_process_count, + + -- Uptime (using uptime_seconds) + MAX(uptime_seconds) as max_uptime, + + -- Sample statistics + COUNT(*) as sample_count, + MIN(time) as first_seen, + MAX(time) as last_seen +FROM server_metrics +GROUP BY bucket, server_id, hostname, os_info; + +-- Create optimized indexes for new views +CREATE INDEX ON metrics_30m_avg (server_id, bucket DESC); +CREATE INDEX ON metrics_2h_avg (server_id, bucket DESC); +CREATE INDEX ON metrics_6h_avg (server_id, bucket DESC); + +-- Partial indexes for alert conditions on new views +CREATE INDEX ON metrics_30m_avg (server_id, bucket DESC) WHERE max_cpu > 80 OR max_memory > 85; +CREATE INDEX ON metrics_2h_avg (server_id, bucket DESC) WHERE max_cpu > 80 OR max_memory > 85; +CREATE INDEX ON metrics_6h_avg (server_id, bucket DESC) WHERE max_cpu > 80 OR max_memory > 85; + +-- Refresh policies for new optimized views +-- 30-minute view: Refresh every 5 minutes +SELECT add_continuous_aggregate_policy('metrics_30m_avg', + start_offset => INTERVAL '2 hours', + end_offset => INTERVAL '5 minutes', + schedule_interval => INTERVAL '5 minutes' +); + +-- 2-hour view: Refresh every 15 minutes +SELECT add_continuous_aggregate_policy('metrics_2h_avg', + start_offset => INTERVAL '6 hours', + end_offset => INTERVAL '15 minutes', + schedule_interval => INTERVAL '15 minutes' +); + +-- 6-hour view: Refresh every 30 minutes +SELECT add_continuous_aggregate_policy('metrics_6h_avg', + start_offset => INTERVAL '12 hours', + end_offset => INTERVAL '30 minutes', + schedule_interval => INTERVAL '30 minutes' +); + +-- Add compression policies for new views +SELECT add_compression_policy('metrics_30m_avg', INTERVAL '6 hours'); +SELECT add_compression_policy('metrics_2h_avg', INTERVAL '2 days'); +SELECT add_compression_policy('metrics_6h_avg', INTERVAL '5 days'); + +-- Retention policies for optimized views +-- Keep 30-minute data for 2 days +SELECT add_retention_policy('metrics_30m_avg', INTERVAL '2 days'); + +-- Keep 2-hour data for 14 days +SELECT add_retention_policy('metrics_2h_avg', INTERVAL '14 days'); + +-- Keep 6-hour data for 120 days (extended retention) +SELECT add_retention_policy('metrics_6h_avg', INTERVAL '120 days'); + +-- Grant permissions +GRANT SELECT ON metrics_30m_avg TO server_eye_read; +GRANT SELECT ON metrics_2h_avg TO server_eye_read; +GRANT SELECT ON metrics_6h_avg TO server_eye_read; + +-- Add comments for documentation +COMMENT ON MATERIALIZED VIEW metrics_30m_avg IS '30-minute aggregated metrics optimized for 6-24 hour dashboard visualization (48 points max)'; +COMMENT ON MATERIALIZED VIEW metrics_2h_avg IS '2-hour aggregated metrics optimized for 1-7 day trend analysis (84 points max)'; +COMMENT ON MATERIALIZED VIEW metrics_6h_avg IS '6-hour aggregated metrics optimized for 7-30+ day analysis (120 points max for 30 days)'; diff --git a/docker-compose.yml b/docker-compose.yml index 2cfe5f3..bf404eb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -23,7 +23,7 @@ services: build: context: . dockerfile: Dockerfile - container_name: ServereyeAPI-api + container_name: ServerEyeAPI-api command: ["./servereye"] restart: unless-stopped ports: @@ -31,9 +31,9 @@ services: environment: - HOST=0.0.0.0 - PORT=8080 - - DATABASE_URL=postgres://servereye:password@api-postgres:5432/servereye?sslmode=disable - - KEYS_DATABASE_URL=postgres://servereye:password@api-postgres:5432/servereye?sslmode=disable - - TIMESCALEDB_URL=postgres://servereye:password@api-timescaledb:5432/servereye?sslmode=disable + - DATABASE_URL=postgres://postgres:password@api-postgres:5432/servereye?sslmode=disable + - KEYS_DATABASE_URL=postgres://postgres:password@api-postgres:5432/servereye?sslmode=disable + - TIMESCALEDB_URL=postgres://postgres:password@api-timescaledb:5432/servereye?sslmode=disable - STATIC_DATA_URL=postgres://servereye:password@api-postgres-static:5432/servereye?sslmode=disable - JWT_SECRET=your-super-secret-jwt-key-change-in-production - WEBHOOK_SECRET=your-webhook-secret-change-in-production @@ -78,7 +78,7 @@ services: # PostgreSQL for metadata api-postgres: image: postgres:15-alpine - container_name: ServereyeAPI-postgres + container_name: ServerEyeAPI-postgres restart: unless-stopped environment: POSTGRES_DB: servereye @@ -106,7 +106,7 @@ services: # TimescaleDB for time-series data api-timescaledb: image: timescale/timescaledb:2.15.0-pg15 - container_name: ServereyeAPI-timescaledb + container_name: ServerEyeAPI-timescaledb restart: unless-stopped environment: POSTGRES_DB: servereye @@ -114,7 +114,8 @@ services: POSTGRES_PASSWORD: password volumes: - servereye_timescaledb_data:/var/lib/postgresql/data - - ./deployments/timescaledb-init.sql:/docker-entrypoint-initdb.d/init-timescaledb.sql + - ./deployments/timescaledb/timescaledb-init.sql:/docker-entrypoint-initdb.d/init-timescaledb.sql + - ./deployments/timescaledb/timescaledb-multi-tier.sql:/docker-entrypoint-initdb.d/init-multi-tier.sql - ./deployments:/migrations:ro networks: - servereye-network @@ -149,7 +150,7 @@ services: # PostgreSQL for static/persistent server data api-postgres-static: image: postgres:15-alpine - container_name: ServereyeAPI-postgres-static + container_name: ServerEyeAPI-postgres-static restart: unless-stopped environment: POSTGRES_DB: servereye @@ -157,7 +158,7 @@ services: POSTGRES_PASSWORD: password volumes: - servereye_postgres_static_data:/var/lib/postgresql/data - - ./deployments/migration-005-static-data.sql:/docker-entrypoint-initdb.d/init-static-data.sql + - ./deployments/static-postgres/migration-005-static-data.sql:/docker-entrypoint-initdb.d/init-static-data.sql networks: - servereye-network healthcheck: @@ -184,8 +185,4 @@ volumes: networks: servereye-network: - name: servereye-network - driver: bridge - ipam: - config: - - subnet: 172.20.0.0/16 + external: true diff --git a/go.mod b/go.mod index 51bc9e6..dafa0e3 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/godofphonk/ServerEyeAPI -go 1.25.0 +go 1.26.0 require ( github.com/caarlos0/env/v10 v10.0.0 diff --git a/go.sum b/go.sum index dae8e8a..3cf5e99 100644 --- a/go.sum +++ b/go.sum @@ -6,6 +6,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY= github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4= @@ -45,12 +47,16 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/internal/api/routes.go b/internal/api/routes.go index 5e0af3f..62704d9 100644 --- a/internal/api/routes.go +++ b/internal/api/routes.go @@ -35,6 +35,7 @@ func SetupRoutes( healthHandler *handlers.HealthHandler, metricsHandler *handlers.MetricsHandler, tieredMetricsHandler *handlers.TieredMetricsHandler, + unifiedServerHandler *handlers.UnifiedServerHandler, serversHandler *handlers.ServersHandler, serverSourcesHandler *handlers.ServerSourcesHandler, commandsHandler *handlers.CommandsHandler, @@ -63,6 +64,10 @@ func SetupRoutes( // Metrics endpoint by key (public for TG bot) router.HandleFunc("/api/servers/by-key/{server_key}/metrics", metricsHandler.GetServerMetricsByKey).Methods("GET") + // Server status endpoints (public) + router.HandleFunc("/api/servers/{server_id}/status", metricsHandler.GetServerStatus).Methods("GET") + router.HandleFunc("/api/servers/by-key/{server_key}/status", metricsHandler.GetServerStatusByKey).Methods("GET") + // Server sources endpoints (public for TG bot and web) router.HandleFunc("/api/servers/{server_id}/sources", serverSourcesHandler.AddServerSource).Methods("POST") router.HandleFunc("/api/servers/{server_id}/sources", serverSourcesHandler.GetServerSources).Methods("GET") @@ -72,6 +77,20 @@ func SetupRoutes( router.HandleFunc("/api/servers/by-key/{server_key}/sources", serverSourcesHandler.AddServerSourceByKey).Methods("POST") router.HandleFunc("/api/servers/by-key/{server_key}/sources", serverSourcesHandler.GetServerSourcesByKey).Methods("GET") router.HandleFunc("/api/servers/by-key/{server_key}/sources/{source}", serverSourcesHandler.RemoveServerSourceByKey).Methods("DELETE") + router.HandleFunc("/api/servers/by-key/{server_key}/sources/{source_type}/identifiers", serverSourcesHandler.RemoveServerSourceIdentifiersByKey).Methods("DELETE") + + // Server source identifiers endpoints (public for TG bot and web) + router.HandleFunc("/api/servers/{server_id}/sources/identifiers", serverSourcesHandler.AddServerSourceIdentifiers).Methods("POST") + router.HandleFunc("/api/servers/{server_id}/sources/identifiers", serverSourcesHandler.GetServerSourceIdentifiers).Methods("GET") + router.HandleFunc("/api/servers/{server_id}/sources/{source_type}/identifiers", serverSourcesHandler.RemoveServerSourceIdentifiers).Methods("DELETE") + router.HandleFunc("/api/servers/{server_id}/sources/{source_type}/identifiers/{identifier}/telegram-id", serverSourcesHandler.UpdateTelegramID).Methods("PUT") + + // Server source identifiers by key endpoints (public for TG bot) + router.HandleFunc("/api/servers/by-key/{server_key}/sources/identifiers", serverSourcesHandler.AddServerSourceIdentifiersByKey).Methods("POST") + router.HandleFunc("/api/servers/by-key/{server_key}/sources/identifiers", serverSourcesHandler.GetServerSourceIdentifiersByKey).Methods("GET") + + // Get servers by Telegram ID (public for TG bot) + router.HandleFunc("/api/servers/by-telegram/{telegramId}", serverSourcesHandler.GetServersByTelegramID).Methods("GET") // API Key management routes (admin only) - TODO: Add middleware protection router.HandleFunc("/api/admin/keys", apiKeyHandler.CreateAPIKey).Methods("POST") @@ -81,6 +100,10 @@ func SetupRoutes( // Unified metrics endpoint (public) router.HandleFunc("/api/servers/{server_id}/metrics/tiered", tieredMetricsHandler.GetMetrics).Methods("GET") + router.HandleFunc("/api/servers/by-key/{server_key}/metrics/tiered", tieredMetricsHandler.GetMetricsByKey).Methods("GET") + + // Unified server data endpoint (public) - combines metrics, status, and static info + router.HandleFunc("/api/servers/by-key/{server_key}/unified", unifiedServerHandler.GetUnifiedServerData).Methods("GET") // Static server information endpoints (public) router.HandleFunc("/api/servers/{server_id}/static-info", staticInfoHandler.UpsertStaticInfo).Methods("POST", "PUT") diff --git a/internal/api/server.go b/internal/api/server.go index af8f022..1c4ca60 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -101,13 +101,14 @@ func New(cfg *config.Config, logger *logrus.Logger) (*Server, error) { // Initialize repositories alertRepo := timescaledbRepo.NewAlertRepository(timescaleDBClient.GetPool(), logger) + identifierRepo := postgresRepo.NewServerSourceIdentifierRepository(pgClient.DB(), logger) // Initialize services with repositories - authService := services.NewAuthService(keyRepo, serverRepo, logger) - serverService := services.NewServerService(serverRepo, keyRepo, logger) + authService := services.NewAuthService(keyRepo, serverRepo, identifierRepo, logger) + serverService := services.NewServerService(serverRepo, keyRepo, identifierRepo, logger) alertService := services.NewAlertService(alertRepo, logger) metricsService := services.NewMetricsService(keyRepo, storageImpl, alertService, logger) - tieredMetricsService := services.NewTieredMetricsService(timescaleDBClient, logger) + tieredMetricsService := services.NewTieredMetricsService(timescaleDBClient, pgClient.DB(), logger) commandsService := services.NewCommandsService(keyRepo, logger) metricsCommandsService := services.NewMetricsCommandsService(timescaleDBClient, logger) @@ -122,6 +123,7 @@ func New(cfg *config.Config, logger *logrus.Logger) (*Server, error) { healthHandler := handlers.NewHealthHandler(storageImpl, logger) metricsHandler := handlers.NewMetricsHandler(metricsService, logger) tieredMetricsHandler := handlers.NewTieredMetricsHandler(tieredMetricsService, logger) + unifiedServerHandler := handlers.NewUnifiedServerHandler(metricsService, tieredMetricsService, staticDataStorage, logger) serversHandler := handlers.NewServersHandler(storageImpl, logger) serverSourcesHandler := handlers.NewServerSourcesHandler(serverService, logger) commandsHandler := handlers.NewCommandsHandler(commandsService, logger) @@ -140,6 +142,7 @@ func New(cfg *config.Config, logger *logrus.Logger) (*Server, error) { healthHandler, metricsHandler, tieredMetricsHandler, + unifiedServerHandler, serversHandler, serverSourcesHandler, commandsHandler, diff --git a/internal/handlers/metrics.go b/internal/handlers/metrics.go index c2b4149..6be6058 100644 --- a/internal/handlers/metrics.go +++ b/internal/handlers/metrics.go @@ -96,6 +96,57 @@ func (h *MetricsHandler) GetServerMetricsByKey(w http.ResponseWriter, r *http.Re h.writeJSON(w, http.StatusOK, responseMap) } +// GetServerStatus handles GET /api/servers/{server_id}/status +func (h *MetricsHandler) GetServerStatus(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + serverID := vars["server_id"] + + if serverID == "" { + h.writeError(w, "server_id is required", http.StatusBadRequest) + return + } + + response, err := h.metricsService.GetServerStatus(r.Context(), serverID) + if err != nil { + h.logger.WithError(err).WithField("server_id", serverID).Error("Failed to get server status") + h.writeError(w, "Failed to get server status", http.StatusInternalServerError) + return + } + + h.writeJSON(w, http.StatusOK, response) +} + +// GetServerStatusByKey handles GET /api/servers/by-key/{server_key}/status +func (h *MetricsHandler) GetServerStatusByKey(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + serverKey := vars["server_key"] + + if serverKey == "" { + h.writeError(w, "server_key is required", http.StatusBadRequest) + return + } + + // Get server info by key first + serverInfo, err := h.metricsService.GetServerByKey(r.Context(), serverKey) + if err != nil { + h.logger.WithError(err).WithField("server_key", serverKey).Error("Failed to get server by key") + h.writeError(w, "Server not found", http.StatusNotFound) + return + } + + response, err := h.metricsService.GetServerStatus(r.Context(), serverInfo.ServerID) + if err != nil { + h.logger.WithError(err).WithField("server_id", serverInfo.ServerID).Error("Failed to get server status") + h.writeError(w, "Failed to get server status", http.StatusInternalServerError) + return + } + + // Add server_key to response + responseMap := response + responseMap["server_key"] = serverKey + h.writeJSON(w, http.StatusOK, responseMap) +} + // writeJSON writes JSON response func (h *MetricsHandler) writeJSON(w http.ResponseWriter, status int, data interface{}) { w.Header().Set("Content-Type", "application/json") diff --git a/internal/handlers/metrics_push.go b/internal/handlers/metrics_push.go index a49c289..7885265 100644 --- a/internal/handlers/metrics_push.go +++ b/internal/handlers/metrics_push.go @@ -3,6 +3,7 @@ package handlers import ( "context" "encoding/json" + "io" "net/http" "time" @@ -41,36 +42,79 @@ func (h *MetricsPushHandler) PushMetrics(w http.ResponseWriter, r *http.Request) return } + // Try to parse as V2 format first + var bodyBytes []byte + bodyBytes, err = io.ReadAll(r.Body) + if err != nil { + h.logger.WithError(err).Error("Failed to read request body") + http.Error(w, "Failed to read request", http.StatusBadRequest) + return + } + + // Try V2 format + var v2Msg struct { + Metrics models.MetricsV2 `json:"metrics"` + } + if err := json.Unmarshal(bodyBytes, &v2Msg); err == nil && !v2Msg.Metrics.Timestamp.IsZero() { + h.logger.WithFields(logrus.Fields{ + "server_id": serverInfo.ServerID, + "format": "v2", + "cpu_total": v2Msg.Metrics.CPUUsage.UsageTotal, + "memory_used": v2Msg.Metrics.Memory.UsedPercent, + "temperature": v2Msg.Metrics.Temperature.Highest, + }).Info("📊 HTTP: Received V2 metrics format") + + // Convert V2 to old format + oldMetrics := h.convertV2ToOldFormat(&v2Msg.Metrics) + oldMetrics.Time = v2Msg.Metrics.Timestamp + + if err := h.storage.StoreMetric(r.Context(), serverInfo.ServerID, oldMetrics); err != nil { + h.logger.WithError(err).WithField("server_id", serverInfo.ServerID).Error("Failed to store V2 metrics") + http.Error(w, "Failed to store metrics", http.StatusInternalServerError) + return + } + + h.logger.WithFields(logrus.Fields{ + "server_id": serverInfo.ServerID, + "cpu": oldMetrics.CPU, + "memory": oldMetrics.Memory, + "temperature": oldMetrics.TemperatureDetails.HighestTemperature, + }).Info("✅ V2 metrics stored successfully via HTTP") + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]interface{}{ + "success": true, + "server_id": serverInfo.ServerID, + "timestamp": time.Now().Unix(), + }) + return + } + + // Fallback to V1 format var metricsMsg models.MetricsMessage - if err := json.NewDecoder(r.Body).Decode(&metricsMsg); err != nil { - h.logger.WithError(err).Error("Failed to decode metrics message") + if err := json.Unmarshal(bodyBytes, &metricsMsg); err != nil { + h.logger.WithError(err).Error("Failed to decode metrics message (V1 or V2)") http.Error(w, "Invalid JSON format", http.StatusBadRequest) return } metricsMsg.ServerID = serverInfo.ServerID - - // Always set timestamp to current time metricsMsg.Metrics.Time = time.Now() h.logger.WithFields(logrus.Fields{ "server_id": serverInfo.ServerID, - "timestamp": metricsMsg.Metrics.Time, - "is_zero": metricsMsg.Metrics.Time.IsZero(), - }).Info("About to store metrics with timestamp") + "format": "v1", + "cpu": metricsMsg.Metrics.CPU, + "memory": metricsMsg.Metrics.Memory, + }).Info("📊 HTTP: Received V1 metrics format") if err := h.storage.StoreMetric(r.Context(), serverInfo.ServerID, &metricsMsg.Metrics); err != nil { - h.logger.WithError(err).WithField("server_id", serverInfo.ServerID).Error("Failed to store metrics") + h.logger.WithError(err).WithField("server_id", serverInfo.ServerID).Error("Failed to store V1 metrics") http.Error(w, "Failed to store metrics", http.StatusInternalServerError) return } - h.logger.WithFields(logrus.Fields{ - "server_id": serverInfo.ServerID, - "hostname": serverInfo.Hostname, - "cpu": metricsMsg.Metrics.CPU, - "memory": metricsMsg.Metrics.Memory, - }).Info("Metrics stored successfully via HTTP") + h.logger.WithField("server_id", serverInfo.ServerID).Info("✅ V1 metrics stored successfully via HTTP") w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(map[string]interface{}{ diff --git a/internal/handlers/metrics_push_convert.go b/internal/handlers/metrics_push_convert.go new file mode 100644 index 0000000..4425f28 --- /dev/null +++ b/internal/handlers/metrics_push_convert.go @@ -0,0 +1,119 @@ +package handlers + +import "github.com/godofphonk/ServerEyeAPI/internal/models" + +// convertV2ToOldFormat converts new MetricsV2 format to old ServerMetrics format +func (h *MetricsPushHandler) convertV2ToOldFormat(v2 *models.MetricsV2) *models.ServerMetrics { + old := &models.ServerMetrics{} + + // Aggregated values for backward compatibility + old.CPU = v2.CPUUsage.UsageTotal + old.Memory = v2.Memory.UsedPercent + + // Calculate average disk usage + if len(v2.Disks) > 0 { + var totalDiskUsage float64 + for _, disk := range v2.Disks { + totalDiskUsage += disk.UsedPercent + } + old.Disk = totalDiskUsage / float64(len(v2.Disks)) + } + + // Calculate total network traffic in MB + var totalRxMB, totalTxMB float64 + for _, iface := range v2.Network.Interfaces { + totalRxMB += float64(iface.RxBytes) / 1024 / 1024 + totalTxMB += float64(iface.TxBytes) / 1024 / 1024 + } + old.Network = totalRxMB + totalTxMB + + // CPU detailed metrics + old.CPUUsage.UsageTotal = v2.CPUUsage.UsageTotal + old.CPUUsage.UsageUser = v2.CPUUsage.UsageUser + old.CPUUsage.UsageSystem = v2.CPUUsage.UsageSystem + old.CPUUsage.UsageIdle = v2.CPUUsage.UsageIdle + old.CPUUsage.LoadAverage.Load1 = v2.CPUUsage.LoadAverage.Load1Min + old.CPUUsage.LoadAverage.Load5 = v2.CPUUsage.LoadAverage.Load5Min + old.CPUUsage.LoadAverage.Load15 = v2.CPUUsage.LoadAverage.Load15Min + old.CPUUsage.Frequency = v2.CPUUsage.FrequencyMHz + + // Memory detailed metrics + old.MemoryDetails.TotalGB = v2.Memory.TotalGB + old.MemoryDetails.UsedGB = v2.Memory.UsedGB + old.MemoryDetails.AvailableGB = v2.Memory.AvailableGB + old.MemoryDetails.FreeGB = v2.Memory.FreeGB + old.MemoryDetails.BuffersGB = v2.Memory.BuffersGB + old.MemoryDetails.CachedGB = v2.Memory.CachedGB + old.MemoryDetails.UsedPercent = v2.Memory.UsedPercent + + // Disk detailed metrics + if len(v2.Disks) > 0 { + old.DiskDetails = make([]struct { + Path string `json:"path"` + TotalGB float64 `json:"total_gb"` + UsedGB float64 `json:"used_gb"` + FreeGB float64 `json:"free_gb"` + UsedPercent float64 `json:"used_percent"` + Filesystem string `json:"filesystem"` + }, len(v2.Disks)) + for i, disk := range v2.Disks { + old.DiskDetails[i].Path = disk.MountPoint + old.DiskDetails[i].UsedGB = disk.UsedGB + old.DiskDetails[i].FreeGB = disk.FreeGB + old.DiskDetails[i].UsedPercent = disk.UsedPercent + old.DiskDetails[i].TotalGB = disk.UsedGB + disk.FreeGB + } + } + + // Network detailed metrics + if len(v2.Network.Interfaces) > 0 { + old.NetworkDetails.Interfaces = make([]struct { + Name string `json:"name"` + RxBytes int64 `json:"rx_bytes"` + TxBytes int64 `json:"tx_bytes"` + RxPackets int64 `json:"rx_packets"` + TxPackets int64 `json:"tx_packets"` + RxSpeedMbps float64 `json:"rx_speed_mbps"` + TxSpeedMbps float64 `json:"tx_speed_mbps"` + Status string `json:"status"` + }, len(v2.Network.Interfaces)) + for i, iface := range v2.Network.Interfaces { + old.NetworkDetails.Interfaces[i].Name = iface.Name + old.NetworkDetails.Interfaces[i].RxBytes = iface.RxBytes + old.NetworkDetails.Interfaces[i].TxBytes = iface.TxBytes + old.NetworkDetails.Interfaces[i].RxPackets = iface.RxPackets + old.NetworkDetails.Interfaces[i].TxPackets = iface.TxPackets + old.NetworkDetails.Interfaces[i].RxSpeedMbps = iface.RxSpeedMbps + old.NetworkDetails.Interfaces[i].TxSpeedMbps = iface.TxSpeedMbps + old.NetworkDetails.Interfaces[i].Status = iface.Status + } + old.NetworkDetails.TotalRxMbps = v2.Network.TotalRxMbps + old.NetworkDetails.TotalTxMbps = v2.Network.TotalTxMbps + } + + // Temperature detailed metrics + old.TemperatureDetails.CPUTemperature = v2.Temperature.CPU + old.TemperatureDetails.GPUTemperature = v2.Temperature.GPU + old.TemperatureDetails.HighestTemperature = v2.Temperature.Highest + + if len(v2.Temperature.Storage) > 0 { + old.TemperatureDetails.StorageTemperatures = make([]struct { + Device string `json:"device"` + Type string `json:"type"` + Temperature float64 `json:"temperature"` + }, len(v2.Temperature.Storage)) + + for i, storage := range v2.Temperature.Storage { + old.TemperatureDetails.StorageTemperatures[i].Device = storage.Device + old.TemperatureDetails.StorageTemperatures[i].Temperature = storage.Temperature + } + } + + // System detailed metrics + old.SystemDetails.ProcessesTotal = v2.System.ProcessesTotal + old.SystemDetails.ProcessesRunning = v2.System.ProcessesRunning + old.SystemDetails.ProcessesSleeping = v2.System.ProcessesSleeping + old.SystemDetails.UptimeSeconds = v2.System.UptimeSeconds + + return old +} diff --git a/internal/handlers/server_sources.go b/internal/handlers/server_sources.go index 76a5a85..6e22039 100644 --- a/internal/handlers/server_sources.go +++ b/internal/handlers/server_sources.go @@ -24,6 +24,7 @@ import ( "encoding/json" "net/http" + "github.com/godofphonk/ServerEyeAPI/internal/models" "github.com/godofphonk/ServerEyeAPI/internal/services" "github.com/gorilla/mux" "github.com/sirupsen/logrus" @@ -208,6 +209,223 @@ func (h *ServerSourcesHandler) AddServerSourceByKey(w http.ResponseWriter, r *ht }) } +// AddServerSourceIdentifiers handles POST /api/servers/{server_id}/sources/identifiers +func (h *ServerSourcesHandler) AddServerSourceIdentifiers(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + serverID := vars["server_id"] + + if serverID == "" { + h.writeError(w, "server_id is required", http.StatusBadRequest) + return + } + + var req models.SourceIdentifierRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + h.writeError(w, "Invalid request body", http.StatusBadRequest) + return + } + + err := h.serverService.AddServerSourceIdentifiers(r.Context(), serverID, &req) + if err != nil { + h.logger.WithError(err).WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": req.SourceType, + "identifiers": len(req.Identifiers), + "identifier_type": req.IdentifierType, + }).Error("Failed to add server source identifiers") + h.writeError(w, err.Error(), http.StatusBadRequest) + return + } + + h.writeJSON(w, http.StatusOK, map[string]interface{}{ + "message": "Identifiers added successfully", + "server_id": serverID, + "source_type": req.SourceType, + "identifiers": req.Identifiers, + "identifier_type": req.IdentifierType, + }) +} + +// AddServerSourceIdentifiersByKey handles POST /api/servers/by-key/{server_key}/sources/identifiers +func (h *ServerSourcesHandler) AddServerSourceIdentifiersByKey(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + serverKey := vars["server_key"] + + if serverKey == "" { + h.writeError(w, "server_key is required", http.StatusBadRequest) + return + } + + var req models.SourceIdentifierRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + h.writeError(w, "Invalid request body", http.StatusBadRequest) + return + } + + // Get server ID from key + serverInfo, err := h.serverService.GetServerByKey(r.Context(), serverKey) + if err != nil { + h.writeError(w, "Server not found", http.StatusNotFound) + return + } + + err = h.serverService.AddServerSourceIdentifiers(r.Context(), serverInfo.ServerID, &req) + if err != nil { + h.logger.WithError(err).WithFields(logrus.Fields{ + "server_key": serverKey, + "source_type": req.SourceType, + "identifiers": len(req.Identifiers), + "identifier_type": req.IdentifierType, + "telegram_id": req.TelegramID, + }).Error("Failed to add server source identifiers by key") + h.writeError(w, err.Error(), http.StatusBadRequest) + return + } + + response := map[string]interface{}{ + "message": "Identifiers added successfully", + "server_id": serverInfo.ServerID, + "server_key": serverKey, + "source_type": req.SourceType, + "identifiers": req.Identifiers, + "identifier_type": req.IdentifierType, + } + + // Add telegram_id to response if present + if req.TelegramID != nil { + response["telegram_id"] = *req.TelegramID + } + + h.writeJSON(w, http.StatusOK, response) +} + +// GetServerSourceIdentifiers handles GET /api/servers/{server_id}/sources/identifiers +func (h *ServerSourcesHandler) GetServerSourceIdentifiers(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + serverID := vars["server_id"] + + if serverID == "" { + h.writeError(w, "server_id is required", http.StatusBadRequest) + return + } + + response, err := h.serverService.GetServerSourceIdentifiers(r.Context(), serverID) + if err != nil { + h.logger.WithError(err).WithField("server_id", serverID).Error("Failed to get server source identifiers") + h.writeError(w, "Failed to get identifiers", http.StatusInternalServerError) + return + } + + h.writeJSON(w, http.StatusOK, response) +} + +// GetServerSourceIdentifiersByKey handles GET /api/servers/by-key/{server_key}/sources/identifiers +func (h *ServerSourcesHandler) GetServerSourceIdentifiersByKey(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + serverKey := vars["server_key"] + + if serverKey == "" { + h.writeError(w, "server_key is required", http.StatusBadRequest) + return + } + + // Get server ID from key + serverInfo, err := h.serverService.GetServerByKey(r.Context(), serverKey) + if err != nil { + h.writeError(w, "Server not found", http.StatusNotFound) + return + } + + response, err := h.serverService.GetServerSourceIdentifiers(r.Context(), serverInfo.ServerID) + if err != nil { + h.logger.WithError(err).WithField("server_key", serverKey).Error("Failed to get server source identifiers by key") + h.writeError(w, "Failed to get identifiers", http.StatusInternalServerError) + return + } + + h.writeJSON(w, http.StatusOK, response) +} + +// RemoveServerSourceIdentifiers handles DELETE /api/servers/{server_id}/sources/{source_type}/identifiers +func (h *ServerSourcesHandler) RemoveServerSourceIdentifiers(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + serverID := vars["server_id"] + sourceType := vars["source_type"] + + if serverID == "" { + h.writeError(w, "server_id is required", http.StatusBadRequest) + return + } + + if sourceType == "" { + h.writeError(w, "source_type is required", http.StatusBadRequest) + return + } + + var req struct { + Identifiers []string `json:"identifiers"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + h.writeError(w, "Invalid request body", http.StatusBadRequest) + return + } + + if len(req.Identifiers) == 0 { + h.writeError(w, "At least one identifier is required", http.StatusBadRequest) + return + } + + // Log the request for debugging + h.logger.WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": sourceType, + "identifiers": req.Identifiers, + }).Info("RemoveServerSourceIdentifiers request received") + + err := h.serverService.RemoveServerSourceIdentifiers(r.Context(), serverID, sourceType, req.Identifiers) + if err != nil { + h.logger.WithError(err).WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": sourceType, + "identifiers": len(req.Identifiers), + }).Error("Failed to remove server source identifiers") + h.writeError(w, err.Error(), http.StatusBadRequest) + return + } + + h.writeJSON(w, http.StatusOK, map[string]interface{}{ + "message": "Identifiers removed successfully", + "server_id": serverID, + "source_type": sourceType, + "identifiers": req.Identifiers, + }) +} + +// GetServersByTelegramID handles GET /api/servers/by-telegram/{telegramId} +func (h *ServerSourcesHandler) GetServersByTelegramID(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + telegramID := vars["telegramId"] + + if telegramID == "" { + h.writeError(w, "telegramId is required", http.StatusBadRequest) + return + } + + servers, err := h.serverService.GetServersByTelegramID(r.Context(), telegramID) + if err != nil { + h.logger.WithError(err).WithField("telegramId", telegramID).Error("Failed to get servers by Telegram ID") + h.writeError(w, err.Error(), http.StatusBadRequest) + return + } + + h.writeJSON(w, http.StatusOK, map[string]interface{}{ + "telegramId": telegramID, + "servers_count": len(servers), + "servers": servers, + }) +} + // GetServerSourcesByKey handles GET /api/servers/by-key/{server_key}/sources func (h *ServerSourcesHandler) GetServerSourcesByKey(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) @@ -282,6 +500,71 @@ func (h *ServerSourcesHandler) RemoveServerSourceByKey(w http.ResponseWriter, r }) } +// RemoveServerSourceIdentifiersByKey handles DELETE /api/servers/by-key/{server_key}/sources/{source_type}/identifiers +func (h *ServerSourcesHandler) RemoveServerSourceIdentifiersByKey(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + serverKey := vars["server_key"] + sourceType := vars["source_type"] + + if serverKey == "" { + h.writeError(w, "server_key is required", http.StatusBadRequest) + return + } + + if sourceType == "" { + h.writeError(w, "source_type is required", http.StatusBadRequest) + return + } + + var req struct { + Identifiers []string `json:"identifiers"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + h.writeError(w, "Invalid request body", http.StatusBadRequest) + return + } + + if len(req.Identifiers) == 0 { + h.writeError(w, "At least one identifier is required", http.StatusBadRequest) + return + } + + // Log the request for debugging + h.logger.WithFields(logrus.Fields{ + "server_key": serverKey, + "source_type": sourceType, + "identifiers": req.Identifiers, + }).Info("RemoveServerSourceIdentifiersByKey request received") + + // Get server ID from key + serverInfo, err := h.serverService.GetServerByKey(r.Context(), serverKey) + if err != nil { + h.writeError(w, "Server not found", http.StatusNotFound) + return + } + + err = h.serverService.RemoveServerSourceIdentifiers(r.Context(), serverInfo.ServerID, sourceType, req.Identifiers) + if err != nil { + h.logger.WithError(err).WithFields(logrus.Fields{ + "server_id": serverInfo.ServerID, + "server_key": serverKey, + "source_type": sourceType, + "identifiers": len(req.Identifiers), + }).Error("Failed to remove server source identifiers by key") + h.writeError(w, err.Error(), http.StatusBadRequest) + return + } + + h.writeJSON(w, http.StatusOK, map[string]interface{}{ + "message": "Identifiers removed successfully", + "server_id": serverInfo.ServerID, + "server_key": serverKey, + "source_type": sourceType, + "identifiers": req.Identifiers, + }) +} + // writeJSON writes JSON response func (h *ServerSourcesHandler) writeJSON(w http.ResponseWriter, status int, data interface{}) { w.Header().Set("Content-Type", "application/json") @@ -289,6 +572,56 @@ func (h *ServerSourcesHandler) writeJSON(w http.ResponseWriter, status int, data json.NewEncoder(w).Encode(data) } +// UpdateTelegramID handles PUT /api/servers/{server_id}/sources/{source_type}/identifiers/{identifier}/telegram-id +func (h *ServerSourcesHandler) UpdateTelegramID(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + serverID := vars["server_id"] + sourceType := vars["source_type"] + identifier := vars["identifier"] + + if serverID == "" || sourceType == "" || identifier == "" { + h.writeError(w, "server_id, source_type, and identifier are required", http.StatusBadRequest) + return + } + + var req struct { + TelegramID int64 `json:"telegram_id" validate:"required"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + h.writeError(w, "Invalid request body", http.StatusBadRequest) + return + } + + // Update telegram_id + err := h.serverService.UpdateTelegramID(r.Context(), serverID, sourceType, identifier, req.TelegramID) + if err != nil { + h.logger.WithError(err).WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": sourceType, + "identifier": identifier, + "telegram_id": req.TelegramID, + }).Error("Failed to update telegram_id") + h.writeError(w, "Failed to update telegram_id", http.StatusInternalServerError) + return + } + + h.logger.WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": sourceType, + "identifier": identifier, + "telegram_id": req.TelegramID, + }).Info("Telegram ID updated successfully") + + h.writeJSON(w, http.StatusOK, map[string]interface{}{ + "message": "Telegram ID updated successfully", + "server_id": serverID, + "source_type": sourceType, + "identifier": identifier, + "telegram_id": req.TelegramID, + }) +} + // writeError writes error response func (h *ServerSourcesHandler) writeError(w http.ResponseWriter, message string, status int) { h.writeJSON(w, status, map[string]string{"error": message}) diff --git a/internal/handlers/static_info.go b/internal/handlers/static_info.go index 05b0a25..22b738a 100644 --- a/internal/handlers/static_info.go +++ b/internal/handlers/static_info.go @@ -25,6 +25,16 @@ func NewStaticInfoHandler(staticStorage storage.StaticDataStorage, logger *logru } } +// checkStaticStorage verifies that static storage is available +func (h *StaticInfoHandler) checkStaticStorage(w http.ResponseWriter) bool { + if h.staticStorage == nil { + h.logger.Error("Static data storage not available") + http.Error(w, "Static data storage not available", http.StatusServiceUnavailable) + return false + } + return true +} + // UpsertStaticInfo handles POST/PUT requests to update static server information func (h *StaticInfoHandler) UpsertStaticInfo(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) @@ -35,6 +45,10 @@ func (h *StaticInfoHandler) UpsertStaticInfo(w http.ResponseWriter, r *http.Requ return } + if !h.checkStaticStorage(w) { + return + } + var info storage.CompleteStaticInfo if err := json.NewDecoder(r.Body).Decode(&info); err != nil { h.logger.WithError(err).Error("Failed to decode static info request") @@ -68,6 +82,10 @@ func (h *StaticInfoHandler) GetStaticInfo(w http.ResponseWriter, r *http.Request return } + if !h.checkStaticStorage(w) { + return + } + info, err := h.staticStorage.GetCompleteStaticInfo(r.Context(), serverID) if err != nil { h.logger.WithError(err).WithField("server_id", serverID).Error("Failed to get static info") @@ -206,14 +224,46 @@ func (h *StaticInfoHandler) UpsertStaticInfoByKey(w http.ResponseWriter, r *http // Convert server_key to server_id (TODO: implement proper conversion) serverID := "srv_" + serverKey[4:] // Simple conversion for now + // Log incoming request from agent + h.logger.WithFields(logrus.Fields{ + "server_key": serverKey, + "server_id": serverID, + "user_agent": r.Header.Get("User-Agent"), + "method": r.Method, + }).Info("🔄 Received static info update request from agent") + // Read request body var info storage.CompleteStaticInfo if err := json.NewDecoder(r.Body).Decode(&info); err != nil { - h.logger.WithError(err).Error("Failed to decode static info request") + h.logger.WithError(err).WithFields(logrus.Fields{ + "server_key": serverKey, + "server_id": serverID, + }).Error("Failed to decode static info request") http.Error(w, "Invalid request body", http.StatusBadRequest) return } + // Log what data is being sent + dataSections := []string{} + if info.ServerInfo != nil { + dataSections = append(dataSections, "server_info") + } + if info.HardwareInfo != nil { + dataSections = append(dataSections, "hardware_info") + } + if len(info.NetworkInterfaces) > 0 { + dataSections = append(dataSections, fmt.Sprintf("network_interfaces(%d)", len(info.NetworkInterfaces))) + } + if len(info.DiskInfo) > 0 { + dataSections = append(dataSections, fmt.Sprintf("disk_info(%d)", len(info.DiskInfo))) + } + + h.logger.WithFields(logrus.Fields{ + "server_key": serverKey, + "server_id": serverID, + "data_sections": dataSections, + }).Info("📊 Processing static info data sections") + if err := h.staticStorage.UpsertCompleteStaticInfo(r.Context(), serverID, &info); err != nil { h.logger.WithError(err).WithField("server_id", serverID).Error("Failed to upsert static info") http.Error(w, "Failed to update static information", http.StatusInternalServerError) @@ -240,6 +290,10 @@ func (h *StaticInfoHandler) GetStaticInfoByKey(w http.ResponseWriter, r *http.Re return } + if !h.checkStaticStorage(w) { + return + } + // Convert server_key to server_id (TODO: implement proper conversion) serverID := "srv_" + serverKey[4:] // Simple conversion for now diff --git a/internal/handlers/tiered_metrics.go b/internal/handlers/tiered_metrics.go index 9b9d4d7..7ae55c0 100644 --- a/internal/handlers/tiered_metrics.go +++ b/internal/handlers/tiered_metrics.go @@ -86,6 +86,62 @@ func (h *TieredMetricsHandler) GetMetrics(w http.ResponseWriter, r *http.Request h.writeJSON(w, http.StatusOK, response) } +// GetMetricsByKey retrieves metrics using server_key instead of server_id +func (h *TieredMetricsHandler) GetMetricsByKey(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + serverKey := vars["server_key"] + if serverKey == "" { + h.writeJSON(w, http.StatusBadRequest, ErrorResponse{Error: "server_key is required"}) + return + } + + // Convert server_key to server_id + serverID, err := h.service.GetServerIDByKey(r.Context(), serverKey) + if err != nil { + h.writeJSON(w, http.StatusNotFound, ErrorResponse{Error: "Server not found for the provided key"}) + return + } + + startStr := r.URL.Query().Get("start") + endStr := r.URL.Query().Get("end") + if startStr == "" || endStr == "" { + h.writeJSON(w, http.StatusBadRequest, ErrorResponse{Error: "start and end query parameters are required"}) + return + } + + startTime, err := time.Parse(time.RFC3339, startStr) + if err != nil { + h.writeJSON(w, http.StatusBadRequest, ErrorResponse{Error: "invalid start time format, use RFC3339"}) + return + } + + endTime, err := time.Parse(time.RFC3339, endStr) + if err != nil { + h.writeJSON(w, http.StatusBadRequest, ErrorResponse{Error: "invalid end time format, use RFC3339"}) + return + } + + if endTime.Before(startTime) { + h.writeJSON(w, http.StatusBadRequest, ErrorResponse{Error: "end time must be after start time"}) + return + } + + // Limit time range to maximum 30 days + if endTime.Sub(startTime) > 30*24*time.Hour { + h.writeJSON(w, http.StatusBadRequest, ErrorResponse{Error: "time range cannot exceed 30 days"}) + return + } + + response, err := h.service.GetMetricsWithAutoGranularity(r.Context(), serverID, startTime, endTime) + if err != nil { + h.logger.WithError(err).WithField("server_key", serverKey).Error("Failed to get tiered metrics by key") + h.writeJSON(w, http.StatusInternalServerError, ErrorResponse{Error: "Failed to retrieve metrics"}) + return + } + + h.writeJSON(w, http.StatusOK, response) +} + // GetRealTimeMetrics gets real-time metrics (last hour with 1-minute granularity) func (h *TieredMetricsHandler) GetRealTimeMetrics(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) diff --git a/internal/handlers/unified_server.go b/internal/handlers/unified_server.go new file mode 100644 index 0000000..9544d2d --- /dev/null +++ b/internal/handlers/unified_server.go @@ -0,0 +1,229 @@ +package handlers + +import ( + "encoding/json" + "net/http" + "sync" + "time" + + "github.com/gorilla/mux" + "github.com/sirupsen/logrus" + + "github.com/godofphonk/ServerEyeAPI/internal/services" + "github.com/godofphonk/ServerEyeAPI/internal/storage" +) + +// UnifiedServerHandler handles unified server data requests +type UnifiedServerHandler struct { + metricsService *services.MetricsService + tieredService *services.TieredMetricsService + staticStorage storage.StaticDataStorage + logger *logrus.Logger +} + +// NewUnifiedServerHandler creates a new unified server handler +func NewUnifiedServerHandler( + metricsService *services.MetricsService, + tieredService *services.TieredMetricsService, + staticStorage storage.StaticDataStorage, + logger *logrus.Logger, +) *UnifiedServerHandler { + return &UnifiedServerHandler{ + metricsService: metricsService, + tieredService: tieredService, + staticStorage: staticStorage, + logger: logger, + } +} + +// UnifiedResponse combines all server data in one response +type UnifiedResponse struct { + ServerID string `json:"server_id"` + ServerKey string `json:"server_key,omitempty"` + Timestamp string `json:"timestamp"` + + // Components + Metrics interface{} `json:"metrics,omitempty"` + Status interface{} `json:"status,omitempty"` + StaticInfo interface{} `json:"static_info,omitempty"` + + // Performance metadata + ResponseMeta ResponseMeta `json:"response_meta"` +} + +type ResponseMeta struct { + TotalResponseTimeMs int64 `json:"total_response_time_ms"` + ComponentsStatus map[string]CompStatus `json:"components_status"` +} + +type CompStatus struct { + Available bool `json:"available"` + ResponseTime int64 `json:"response_time_ms"` + Error string `json:"error,omitempty"` +} + +// GetUnifiedServerData handles GET /api/servers/by-key/{server_key}/unified +func (h *UnifiedServerHandler) GetUnifiedServerData(w http.ResponseWriter, r *http.Request) { + requestStart := time.Now() + vars := mux.Vars(r) + serverKey := vars["server_key"] + + if serverKey == "" { + h.writeError(w, "server_key is required", http.StatusBadRequest) + return + } + + // Parse optional query parameters + includeMetrics := r.URL.Query().Get("include_metrics") != "false" + includeStatus := r.URL.Query().Get("include_status") != "false" + includeStatic := r.URL.Query().Get("include_static") != "false" + + // Get server info by key first + serverInfo, err := h.metricsService.GetServerByKey(r.Context(), serverKey) + if err != nil { + h.logger.WithError(err).WithField("server_key", serverKey).Error("Failed to get server by key") + h.writeError(w, "Server not found", http.StatusNotFound) + return + } + + // Create unified response + response := UnifiedResponse{ + ServerID: serverInfo.ServerID, + ServerKey: serverKey, + Timestamp: time.Now().Format(time.RFC3339), + ResponseMeta: ResponseMeta{ + ComponentsStatus: make(map[string]CompStatus), + }, + } + + var wg sync.WaitGroup + var mu sync.Mutex + + // Fetch metrics component + if includeMetrics { + wg.Add(1) + go func() { + defer wg.Done() + componentStart := time.Now() + + // Use regular metrics (not tiered) for unified endpoint + metrics, err := h.metricsService.GetServerMetricsWithStatus( + r.Context(), + serverInfo.ServerID, + ) + + componentDuration := time.Since(componentStart).Milliseconds() + + mu.Lock() + if err != nil { + response.ResponseMeta.ComponentsStatus["metrics"] = CompStatus{ + Available: false, + ResponseTime: componentDuration, + Error: err.Error(), + } + } else { + response.ResponseMeta.ComponentsStatus["metrics"] = CompStatus{ + Available: true, + ResponseTime: componentDuration, + } + response.Metrics = metrics + } + mu.Unlock() + }() + } + + // Fetch status component + if includeStatus { + wg.Add(1) + go func() { + defer wg.Done() + componentStart := time.Now() + + status, err := h.metricsService.GetServerStatus(r.Context(), serverInfo.ServerID) + + componentDuration := time.Since(componentStart).Milliseconds() + + mu.Lock() + if err != nil { + response.ResponseMeta.ComponentsStatus["status"] = CompStatus{ + Available: false, + ResponseTime: componentDuration, + Error: err.Error(), + } + } else { + response.ResponseMeta.ComponentsStatus["status"] = CompStatus{ + Available: true, + ResponseTime: componentDuration, + } + response.Status = status + } + mu.Unlock() + }() + } + + // Fetch static info component + if includeStatic { + wg.Add(1) + go func() { + defer wg.Done() + componentStart := time.Now() + + staticInfo, err := h.staticStorage.GetCompleteStaticInfo(r.Context(), serverInfo.ServerID) + + componentDuration := time.Since(componentStart).Milliseconds() + + mu.Lock() + if err != nil { + response.ResponseMeta.ComponentsStatus["static_info"] = CompStatus{ + Available: false, + ResponseTime: componentDuration, + Error: err.Error(), + } + } else if staticInfo.ServerInfo == nil { + response.ResponseMeta.ComponentsStatus["static_info"] = CompStatus{ + Available: false, + ResponseTime: componentDuration, + Error: "Static info not found", + } + } else { + response.ResponseMeta.ComponentsStatus["static_info"] = CompStatus{ + Available: true, + ResponseTime: componentDuration, + } + response.StaticInfo = staticInfo + } + mu.Unlock() + }() + } + + // Wait for all components to complete + wg.Wait() + + // Calculate total response time + response.ResponseMeta.TotalResponseTimeMs = time.Since(requestStart).Milliseconds() + + // Log performance metrics + h.logger.WithFields(logrus.Fields{ + "server_id": serverInfo.ServerID, + "server_key": serverKey, + "total_time_ms": response.ResponseMeta.TotalResponseTimeMs, + "components": len(response.ResponseMeta.ComponentsStatus), + "include_metrics": includeMetrics, + "include_status": includeStatus, + "include_static": includeStatic, + }).Info("Unified server data request completed") + + h.writeJSON(w, http.StatusOK, response) +} + +// writeJSON writes JSON response +func (h *UnifiedServerHandler) writeJSON(w http.ResponseWriter, status int, data interface{}) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + json.NewEncoder(w).Encode(data) +} + +// writeError writes error response +func (h *UnifiedServerHandler) writeError(w http.ResponseWriter, message string, status int) { + h.writeJSON(w, status, map[string]string{"error": message}) +} diff --git a/internal/models/dynamic_metrics.go b/internal/models/dynamic_metrics.go new file mode 100644 index 0000000..dad816a --- /dev/null +++ b/internal/models/dynamic_metrics.go @@ -0,0 +1,46 @@ +package models + +import "time" + +// DynamicMetrics represents real-time server performance metrics +// These are time-series data that change frequently (ONLY dynamic data, NO static info) +type DynamicMetrics struct { + // Core performance metrics (percentages) + CPUPercent float64 `json:"cpu_percent"` + MemoryPercent float64 `json:"memory_percent"` + DiskPercent float64 `json:"disk_percent"` + NetworkMbps float64 `json:"network_mbps"` + + // Load averages (uses existing LoadAverage from metrics_v2.go) + LoadAverage LoadAverage `json:"load_average"` + + // Temperature monitoring (uses existing TemperatureMetrics from metrics_v2.go) + TemperatureCelsius float64 `json:"temperature_celsius"` + Temperatures TemperatureMetrics `json:"temperatures,omitempty"` + + // Process information + ProcessesTotal int `json:"processes_total"` + ProcessesRunning int `json:"processes_running"` + ProcessesSleeping int `json:"processes_sleeping"` + + // System uptime (dynamic - changes every second) + UptimeSeconds int64 `json:"uptime_seconds"` + + // Memory details (dynamic usage - uses existing MemoryMetrics from metrics_v2.go) + MemoryDetails MemoryMetrics `json:"memory_details,omitempty"` + + // Disk details (dynamic usage - uses existing DiskMetrics from metrics_v2.go) + DiskDetails []DiskMetrics `json:"disk_details,omitempty"` + + // Network details (dynamic traffic - uses existing NetworkMetrics from metrics_v2.go) + NetworkDetails NetworkMetrics `json:"network_details,omitempty"` + + // Timestamp + Timestamp time.Time `json:"timestamp"` +} + +// DynamicMetricsResponse represents the API response for metrics endpoint +type DynamicMetricsResponse struct { + ServerID string `json:"server_id"` + Metrics DynamicMetrics `json:"metrics"` +} diff --git a/internal/models/metrics_v2.go b/internal/models/metrics_v2.go new file mode 100644 index 0000000..67540f1 --- /dev/null +++ b/internal/models/metrics_v2.go @@ -0,0 +1,120 @@ +// Copyright (c) 2026 godofphonk +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package models + +import "time" + +// MetricsV2 represents the new simplified metrics structure +type MetricsV2 struct { + CPUUsage CPUUsageMetrics `json:"cpu_usage"` + Memory MemoryMetrics `json:"memory"` + Disks []DiskMetrics `json:"disks"` + Network NetworkMetrics `json:"network"` + Temperature TemperatureMetrics `json:"temperature"` + System SystemMetrics `json:"system"` + Timestamp time.Time `json:"timestamp"` +} + +// CPUUsageMetrics represents CPU usage statistics +type CPUUsageMetrics struct { + UsageTotal float64 `json:"usage_total"` + UsageUser float64 `json:"usage_user"` + UsageSystem float64 `json:"usage_system"` + UsageIdle float64 `json:"usage_idle"` + LoadAverage LoadAverage `json:"load_average"` + FrequencyMHz float64 `json:"frequency_mhz"` +} + +// LoadAverage represents system load averages +type LoadAverage struct { + Load1Min float64 `json:"load_1min"` + Load5Min float64 `json:"load_5min"` + Load15Min float64 `json:"load_15min"` +} + +// MemoryMetrics represents memory usage statistics +type MemoryMetrics struct { + TotalGB float64 `json:"total_gb"` + UsedGB float64 `json:"used_gb"` + AvailableGB float64 `json:"available_gb"` + FreeGB float64 `json:"free_gb"` + BuffersGB float64 `json:"buffers_gb"` + CachedGB float64 `json:"cached_gb"` + UsedPercent float64 `json:"used_percent"` +} + +// DiskMetrics represents disk usage for a single mount point +type DiskMetrics struct { + MountPoint string `json:"mount_point"` + DeviceName string `json:"device_name"` + UsedGB float64 `json:"used_gb"` + FreeGB float64 `json:"free_gb"` + UsedPercent float64 `json:"used_percent"` +} + +// NetworkMetrics represents network statistics +type NetworkMetrics struct { + Interfaces []NetworkInterface `json:"interfaces"` + TotalRxMbps float64 `json:"total_rx_mbps"` + TotalTxMbps float64 `json:"total_tx_mbps"` +} + +// NetworkInterface represents a single network interface statistics +type NetworkInterface struct { + Name string `json:"name"` + RxBytes int64 `json:"rx_bytes"` + TxBytes int64 `json:"tx_bytes"` + RxPackets int64 `json:"rx_packets"` + TxPackets int64 `json:"tx_packets"` + RxSpeedMbps float64 `json:"rx_speed_mbps"` + TxSpeedMbps float64 `json:"tx_speed_mbps"` + Status string `json:"status"` +} + +// TemperatureMetrics represents temperature readings +type TemperatureMetrics struct { + CPU float64 `json:"cpu"` + GPU float64 `json:"gpu"` + Storage []StorageTemperature `json:"storage"` + Highest float64 `json:"highest"` +} + +// StorageTemperature represents temperature of a storage device +type StorageTemperature struct { + Device string `json:"device"` + Temperature float64 `json:"temperature"` +} + +// SystemMetrics represents system-level metrics +type SystemMetrics struct { + ProcessesTotal int `json:"processes_total"` + ProcessesRunning int `json:"processes_running"` + ProcessesSleeping int `json:"processes_sleeping"` + UptimeSeconds int64 `json:"uptime_seconds"` +} + +// MetricsMessageV2 represents the complete metrics message from agent (new format) +type MetricsMessageV2 struct { + Type string `json:"type"` + ServerID string `json:"server_id"` + Data MetricsV2 `json:"data"` + Timestamp int64 `json:"timestamp"` +} diff --git a/internal/models/server_source_identifiers.go b/internal/models/server_source_identifiers.go new file mode 100644 index 0000000..502511f --- /dev/null +++ b/internal/models/server_source_identifiers.go @@ -0,0 +1,59 @@ +// Copyright (c) 2026 godofphonk +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package models + +import "time" + +// ServerSourceIdentifier represents an identifier for a server source +type ServerSourceIdentifier struct { + ID int64 `json:"id" db:"id"` + ServerID string `json:"server_id" db:"server_id"` + SourceType string `json:"source_type" db:"source_type"` // TGBot, Web, Email, etc. + Identifier string `json:"identifier" db:"identifier"` // TG ID, user ID, email + IdentifierType string `json:"identifier_type" db:"identifier_type"` // telegram_id, user_id, email + TelegramID *int64 `json:"telegram_id,omitempty" db:"telegram_id"` // Optional Telegram ID for account linking + Metadata map[string]interface{} `json:"metadata" db:"metadata"` // additional info + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` +} + +// SourceIdentifierRequest represents a request to add/update source identifiers +type SourceIdentifierRequest struct { + SourceType string `json:"source_type" validate:"required,oneof=TGBot Web Email"` // TGBot, Web, Email + Identifiers []string `json:"identifiers" validate:"required,min=1"` // TG IDs, user IDs, emails + IdentifierType string `json:"identifier_type" validate:"required,oneof=telegram_id user_id email"` // telegram_id, user_id, email + TelegramID *int64 `json:"telegram_id,omitempty"` // Optional Telegram ID for account linking + Metadata map[string]interface{} `json:"metadata,omitempty"` // optional metadata +} + +// SourceIdentifierResponse represents response with server source identifiers +type SourceIdentifierResponse struct { + ServerID string `json:"server_id"` + SourceType string `json:"source_type"` + Identifiers []ServerSourceIdentifier `json:"identifiers"` +} + +// ServerSourcesResponse represents combined response with all server sources and identifiers +type ServerSourcesResponse struct { + ServerID string `json:"server_id"` + Sources []string `json:"sources"` // TGBot, Web (legacy) + Identifiers map[string][]ServerSourceIdentifier `json:"identifiers"` // source_type -> identifiers +} diff --git a/internal/services/auth.go b/internal/services/auth.go index 15ee533..9e2a8b0 100644 --- a/internal/services/auth.go +++ b/internal/services/auth.go @@ -31,24 +31,26 @@ import ( // AuthService handles authentication operations using repositories directly type AuthService struct { - keyRepo interfaces.GeneratedKeyRepository - serverRepo interfaces.ServerRepository - logger *logrus.Logger + keyRepo interfaces.GeneratedKeyRepository + serverRepo interfaces.ServerRepository + identifierRepo interfaces.ServerSourceIdentifierRepository + logger *logrus.Logger } // NewAuthService creates a new auth service -func NewAuthService(keyRepo interfaces.GeneratedKeyRepository, serverRepo interfaces.ServerRepository, logger *logrus.Logger) *AuthService { +func NewAuthService(keyRepo interfaces.GeneratedKeyRepository, serverRepo interfaces.ServerRepository, identifierRepo interfaces.ServerSourceIdentifierRepository, logger *logrus.Logger) *AuthService { return &AuthService{ - keyRepo: keyRepo, - serverRepo: serverRepo, - logger: logger, + keyRepo: keyRepo, + serverRepo: serverRepo, + identifierRepo: identifierRepo, + logger: logger, } } // RegisterKey registers a new server key func (s *AuthService) RegisterKey(ctx context.Context, req *models.RegisterKeyRequest) (*models.RegisterKeyResponse, error) { // Use ServerService for registration - serverService := NewServerService(s.serverRepo, s.keyRepo, s.logger) + serverService := NewServerService(s.serverRepo, s.keyRepo, s.identifierRepo, s.logger) serverReq := &RegisterServerRequest{ Hostname: req.Hostname, @@ -71,7 +73,7 @@ func (s *AuthService) RegisterKey(ctx context.Context, req *models.RegisterKeyRe // AuthenticateServer authenticates a server using ServerService func (s *AuthService) AuthenticateServer(ctx context.Context, serverID, serverKey string) (*models.GeneratedKey, error) { // Use ServerService for authentication - serverService := NewServerService(s.serverRepo, s.keyRepo, s.logger) + serverService := NewServerService(s.serverRepo, s.keyRepo, s.identifierRepo, s.logger) server, err := serverService.AuthenticateWebSocket(ctx, serverID, serverKey) if err != nil { @@ -104,13 +106,13 @@ func (s *AuthService) GetServerByID(ctx context.Context, serverID string) (*mode // UpdateServerStatus updates server status func (s *AuthService) UpdateServerStatus(ctx context.Context, serverID, status string) error { - serverService := NewServerService(s.serverRepo, s.keyRepo, s.logger) + serverService := NewServerService(s.serverRepo, s.keyRepo, s.identifierRepo, s.logger) return serverService.UpdateServerStatus(ctx, serverID, status) } // ListServers retrieves all servers func (s *AuthService) ListServers(ctx context.Context) ([]*models.Server, error) { - serverService := NewServerService(s.serverRepo, s.keyRepo, s.logger) + serverService := NewServerService(s.serverRepo, s.keyRepo, s.identifierRepo, s.logger) return serverService.ListServers(ctx, "") } diff --git a/internal/services/metrics.go b/internal/services/metrics.go index bdd2a16..61137ab 100644 --- a/internal/services/metrics.go +++ b/internal/services/metrics.go @@ -191,50 +191,151 @@ func (s *MetricsService) GetAllServerMetrics(ctx context.Context, serverID strin return metrics, nil } -// GetServerMetricsWithStatus retrieves both metrics and status for a server +// GetServerMetricsWithStatus retrieves ONLY dynamic metrics for a server (NO static info, NO status) func (s *MetricsService) GetServerMetricsWithStatus(ctx context.Context, serverID string) (map[string]interface{}, error) { // Verify server exists - key, err := s.keyRepo.GetByServerID(ctx, serverID) + _, err := s.keyRepo.GetByServerID(ctx, serverID) if err != nil { return nil, fmt.Errorf("server not found: %w", err) } - // Get metrics + // Get latest metrics metrics, err := s.storage.GetMetric(ctx, serverID) if err != nil { s.logger.WithFields(logrus.Fields{ "server_id": serverID, "error": err.Error(), - }).Warn("Failed to retrieve metrics, returning status only") + }).Info("No current metrics available") - // Return status only if metrics not available + // Return empty response with current timestamp instead of error + // This prevents frontend from seeing zeros and allows proper "no data" handling return map[string]interface{}{ "server_id": serverID, - "timestamp": time.Now(), - "status": map[string]interface{}{ - "online": false, - "last_seen": key.CreatedAt, - "os_info": key.OSInfo, - "agent_version": key.AgentVersion, - "hostname": key.Hostname, + "metrics": map[string]interface{}{ + "cpu_percent": 0, + "memory_percent": 0, + "disk_percent": 0, + "network_mbps": 0, + "timestamp": time.Now(), + "status": "no_data", }, - "metrics": nil, - "alerts": []string{"Metrics not available"}, }, nil } - // Combine metrics and status + // Build ONLY dynamic metrics response (NO static data) + cleanMetrics := map[string]interface{}{ + "timestamp": metrics.Time, + } + + // Core performance metrics (percentages) + cleanMetrics["cpu_percent"] = metrics.CPU + cleanMetrics["memory_percent"] = metrics.Memory + cleanMetrics["disk_percent"] = metrics.Disk + cleanMetrics["network_mbps"] = metrics.Network + + // Load averages (dynamic) + if metrics.CPUUsage.LoadAverage.Load1 > 0 || metrics.CPUUsage.LoadAverage.Load5 > 0 { + cleanMetrics["load_average"] = map[string]interface{}{ + "1m": metrics.CPUUsage.LoadAverage.Load1, + "5m": metrics.CPUUsage.LoadAverage.Load5, + "15m": metrics.CPUUsage.LoadAverage.Load15, + } + } + + // Temperature (dynamic) + if metrics.TemperatureDetails.HighestTemperature > 0 { + cleanMetrics["temperature_celsius"] = metrics.TemperatureDetails.HighestTemperature + cleanMetrics["temperatures"] = map[string]interface{}{ + "cpu": metrics.TemperatureDetails.CPUTemperature, + "gpu": metrics.TemperatureDetails.GPUTemperature, + "storage": metrics.TemperatureDetails.StorageTemperatures, + "highest": metrics.TemperatureDetails.HighestTemperature, + } + } + + // Process information (dynamic) + if metrics.SystemDetails.ProcessesTotal > 0 { + cleanMetrics["processes_total"] = metrics.SystemDetails.ProcessesTotal + cleanMetrics["processes_running"] = metrics.SystemDetails.ProcessesRunning + cleanMetrics["processes_sleeping"] = metrics.SystemDetails.ProcessesSleeping + } + + // Uptime (dynamic) + if metrics.SystemDetails.UptimeSeconds > 0 { + cleanMetrics["uptime_seconds"] = metrics.SystemDetails.UptimeSeconds + } + + // Memory details (dynamic usage, NOT total) + if metrics.MemoryDetails.UsedGB > 0 { + cleanMetrics["memory_details"] = map[string]interface{}{ + "used_gb": metrics.MemoryDetails.UsedGB, + "available_gb": metrics.MemoryDetails.AvailableGB, + "free_gb": metrics.MemoryDetails.FreeGB, + "buffers_gb": metrics.MemoryDetails.BuffersGB, + "cached_gb": metrics.MemoryDetails.CachedGB, + } + } + + // Disk details (dynamic usage) + if len(metrics.DiskDetails) > 0 { + diskDetails := make([]map[string]interface{}, 0, len(metrics.DiskDetails)) + for _, disk := range metrics.DiskDetails { + diskDetails = append(diskDetails, map[string]interface{}{ + "path": disk.Path, + "used_gb": disk.UsedGB, + "free_gb": disk.FreeGB, + "used_percent": disk.UsedPercent, + }) + } + cleanMetrics["disk_details"] = diskDetails + } + + // Network details (dynamic traffic) + if len(metrics.NetworkDetails.Interfaces) > 0 { + cleanMetrics["network_details"] = map[string]interface{}{ + "total_rx_mbps": metrics.NetworkDetails.TotalRxMbps, + "total_tx_mbps": metrics.NetworkDetails.TotalTxMbps, + } + } + response := map[string]interface{}{ "server_id": serverID, - "timestamp": metrics.Time, - "status": map[string]interface{}{ - "online": true, - "last_seen": metrics.Time, - "os_info": key.OSInfo, - "agent_version": key.AgentVersion, - "hostname": key.Hostname, - }, - "metrics": metrics, + "metrics": cleanMetrics, + } + + return response, nil +} + +// GetServerStatus retrieves ONLY server status (online, last_seen, agent_version) +func (s *MetricsService) GetServerStatus(ctx context.Context, serverID string) (map[string]interface{}, error) { + // Verify server exists + key, err := s.keyRepo.GetByServerID(ctx, serverID) + if err != nil { + return nil, fmt.Errorf("server not found: %w", err) + } + + // Get latest metrics to determine last_seen + metrics, err := s.storage.GetMetric(ctx, serverID) + + var lastSeen time.Time + var online bool + + if err != nil { + // No metrics available - server offline + lastSeen = key.CreatedAt + online = false + } else { + // Metrics available - check if recent + lastSeen = metrics.Time + // Consider online if last seen within 5 minutes + online = time.Since(lastSeen) < 5*time.Minute + } + + response := map[string]interface{}{ + "server_id": serverID, + "online": online, + "last_seen": lastSeen, + "agent_version": key.AgentVersion, } return response, nil diff --git a/internal/services/server_service.go b/internal/services/server_service.go index fc46e39..9bd4eec 100644 --- a/internal/services/server_service.go +++ b/internal/services/server_service.go @@ -23,6 +23,7 @@ package services import ( "context" "fmt" + "strconv" "strings" "time" @@ -35,17 +36,19 @@ import ( // ServerService handles server-related business logic type ServerService struct { - serverRepo interfaces.ServerRepository - keyRepo interfaces.GeneratedKeyRepository - logger *logrus.Logger + serverRepo interfaces.ServerRepository + keyRepo interfaces.GeneratedKeyRepository + identifierRepo interfaces.ServerSourceIdentifierRepository + logger *logrus.Logger } // NewServerService creates a new server service -func NewServerService(serverRepo interfaces.ServerRepository, keyRepo interfaces.GeneratedKeyRepository, logger *logrus.Logger) *ServerService { +func NewServerService(serverRepo interfaces.ServerRepository, keyRepo interfaces.GeneratedKeyRepository, identifierRepo interfaces.ServerSourceIdentifierRepository, logger *logrus.Logger) *ServerService { return &ServerService{ - serverRepo: serverRepo, - keyRepo: keyRepo, - logger: logger, + serverRepo: serverRepo, + keyRepo: keyRepo, + identifierRepo: identifierRepo, + logger: logger, } } @@ -312,7 +315,8 @@ func (s *ServerService) AddServerSource(ctx context.Context, serverID, source st sources := strings.Split(currentSources, ",") for _, src := range sources { if strings.TrimSpace(src) == source { - return fmt.Errorf("source %s already exists for server", source) + // Source already exists, no action needed + return nil } } // Add new source @@ -342,7 +346,7 @@ func (s *ServerService) GetServerSources(ctx context.Context, serverID string) ( return sources, nil } -// RemoveServerSource removes a source from a server +// RemoveServerSource removes a source from a server and its identifiers func (s *ServerService) RemoveServerSource(ctx context.Context, serverID, source string) error { // Get current server info server, err := s.serverRepo.GetByID(ctx, serverID) @@ -354,7 +358,16 @@ func (s *ServerService) RemoveServerSource(ctx context.Context, serverID, source return fmt.Errorf("no sources to remove") } - // Parse and remove source + // Delete all identifiers for this source type + err = s.identifierRepo.DeleteByServerIDAndSourceType(ctx, serverID, source) + if err != nil { + s.logger.WithError(err).WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": source, + }).Warn("Failed to delete identifiers for source") + } + + // Parse and remove source from legacy field sources := strings.Split(server.Sources, ",") var newSources []string found := false @@ -372,11 +385,341 @@ func (s *ServerService) RemoveServerSource(ctx context.Context, serverID, source return fmt.Errorf("source %s not found for server", source) } - // Update server sources + // Update server sources legacy field var newSourcesStr string if len(newSources) > 0 { newSourcesStr = strings.Join(newSources, ",") } + // Update server sources return s.serverRepo.UpdateSources(ctx, serverID, newSourcesStr) } + +// RemoveServerSourceByIdentifier removes a specific identifier and optionally the source if no identifiers remain +func (s *ServerService) RemoveServerSourceByIdentifier(ctx context.Context, serverID, sourceType, identifier string) error { + // Delete the specific identifier + err := s.identifierRepo.DeleteByServerIDSourceTypeAndIdentifier(ctx, serverID, sourceType, identifier) + if err != nil { + return fmt.Errorf("failed to delete identifier: %w", err) + } + + // Check if there are any remaining identifiers for this source type + remaining, err := s.identifierRepo.GetByServerIDAndSourceType(ctx, serverID, sourceType) + if err != nil { + s.logger.WithError(err).Warn("Failed to check remaining identifiers") + return nil // Don't fail the operation + } + + // If no identifiers remain, remove the source from legacy field + if len(remaining) == 0 { + return s.RemoveServerSource(ctx, serverID, sourceType) + } + + s.logger.WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": sourceType, + "identifier": identifier, + "remaining": len(remaining), + }).Info("Identifier removed, source remains due to other identifiers") + + return nil +} + +// AddServerSourceIdentifiers adds multiple identifiers for a server source +func (s *ServerService) AddServerSourceIdentifiers(ctx context.Context, serverID string, req *models.SourceIdentifierRequest) error { + // Validate request + if err := s.validateSourceIdentifierRequest(req); err != nil { + return err + } + + // Get current server info to ensure server exists + _, err := s.serverRepo.GetByID(ctx, serverID) + if err != nil { + return fmt.Errorf("server not found: %w", err) + } + + // Create identifiers + identifiers := make([]*models.ServerSourceIdentifier, 0, len(req.Identifiers)) + for _, id := range req.Identifiers { + // Check if identifier already exists + existing, err := s.identifierRepo.GetByServerIDAndIdentifier(ctx, serverID, req.SourceType, id) + if err == nil && existing != nil { + continue // Skip existing identifier + } + + identifier := &models.ServerSourceIdentifier{ + ServerID: serverID, + SourceType: req.SourceType, + Identifier: id, + IdentifierType: req.IdentifierType, + TelegramID: req.TelegramID, + Metadata: req.Metadata, + } + identifiers = append(identifiers, identifier) + } + + if len(identifiers) == 0 { + // All identifiers already exist, but that's ok - just ensure source exists + if err := s.AddServerSource(ctx, serverID, req.SourceType); err != nil { + s.logger.WithError(err).Warn("Failed to update legacy sources field") + } + return nil + } + + // Create batch + if err := s.identifierRepo.CreateBatch(ctx, identifiers); err != nil { + return fmt.Errorf("failed to create identifiers: %w", err) + } + + // Also update legacy sources field if needed + if err := s.AddServerSource(ctx, serverID, req.SourceType); err != nil { + s.logger.WithError(err).Warn("Failed to update legacy sources field") + } + + s.logger.WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": req.SourceType, + "identifiers": len(identifiers), + "identifier_type": req.IdentifierType, + }).Info("Server source identifiers added successfully") + + return nil +} + +// GetServerSourceIdentifiers gets all identifiers for a server +func (s *ServerService) GetServerSourceIdentifiers(ctx context.Context, serverID string) (*models.ServerSourcesResponse, error) { + // Get server info + server, err := s.serverRepo.GetByID(ctx, serverID) + if err != nil { + return nil, fmt.Errorf("server not found: %w", err) + } + + // Get all identifiers + identifiersMap, err := s.identifierRepo.GetAllByServerID(ctx, serverID) + if err != nil { + return nil, fmt.Errorf("failed to get identifiers: %w", err) + } + + // Convert map to response format + identifiers := make(map[string][]models.ServerSourceIdentifier) + for sourceType, ids := range identifiersMap { + identifiers[sourceType] = make([]models.ServerSourceIdentifier, len(ids)) + for i, id := range ids { + identifiers[sourceType][i] = *id + } + } + + // Parse legacy sources + sources := []string{} + if server.Sources != "" { + sources = strings.Split(server.Sources, ",") + for i, src := range sources { + sources[i] = strings.TrimSpace(src) + } + } + + return &models.ServerSourcesResponse{ + ServerID: serverID, + Sources: sources, + Identifiers: identifiers, + }, nil +} + +// RemoveServerSourceIdentifiers removes identifiers for a server source +func (s *ServerService) RemoveServerSourceIdentifiers(ctx context.Context, serverID, sourceType string, identifiers []string) error { + // Validate source type + if sourceType != "TGBot" && sourceType != "Web" && sourceType != "Email" { + return fmt.Errorf("invalid source type: %s", sourceType) + } + + // Get server info + _, err := s.serverRepo.GetByID(ctx, serverID) + if err != nil { + return fmt.Errorf("server not found: %w", err) + } + + // Delete identifiers + for _, identifier := range identifiers { + err := s.identifierRepo.DeleteByServerIDSourceTypeAndIdentifier(ctx, serverID, sourceType, identifier) + if err != nil { + s.logger.WithError(err).WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": sourceType, + "identifier": identifier, + }).Warn("Failed to delete identifier") + } + } + + // Check if there are any remaining identifiers for this source type + remaining, err := s.identifierRepo.GetByServerIDAndSourceType(ctx, serverID, sourceType) + if err != nil { + s.logger.WithError(err).Warn("Failed to check remaining identifiers") + } else if len(remaining) == 0 { + // Remove from legacy sources field + err := s.RemoveServerSource(ctx, serverID, sourceType) + if err != nil { + s.logger.WithError(err).Warn("Failed to remove from legacy sources field") + } + } + + s.logger.WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": sourceType, + "identifiers": len(identifiers), + }).Info("Server source identifiers removed successfully") + + return nil +} + +// validateSourceIdentifierRequest validates source identifier request +func (s *ServerService) validateSourceIdentifierRequest(req *models.SourceIdentifierRequest) error { + if req.SourceType == "" { + return fmt.Errorf("source_type is required") + } + if req.IdentifierType == "" { + return fmt.Errorf("identifier_type is required") + } + if len(req.Identifiers) == 0 { + return fmt.Errorf("at least one identifier is required") + } + + // Validate source type + validSourceTypes := []string{"TGBot", "Web", "Email"} + isValidSourceType := false + for _, st := range validSourceTypes { + if req.SourceType == st { + isValidSourceType = true + break + } + } + if !isValidSourceType { + return fmt.Errorf("invalid source_type: %s", req.SourceType) + } + + // Validate identifier type + validIdentifierTypes := []string{"telegram_id", "user_id", "email"} + isValidIdentifierType := false + for _, it := range validIdentifierTypes { + if req.IdentifierType == it { + isValidIdentifierType = true + break + } + } + if !isValidIdentifierType { + return fmt.Errorf("invalid identifier_type: %s", req.IdentifierType) + } + + // Validate identifiers for type + for _, id := range req.Identifiers { + if id == "" { + return fmt.Errorf("identifier cannot be empty") + } + + // Additional validation based on type + switch req.IdentifierType { + case "telegram_id": + // Telegram ID should be numeric + if !s.isNumeric(id) { + return fmt.Errorf("telegram_id must be numeric: %s", id) + } + case "email": + // Basic email validation + if !strings.Contains(id, "@") { + return fmt.Errorf("invalid email format: %s", id) + } + } + } + + return nil +} + +// isNumeric checks if string is numeric +func (s *ServerService) isNumeric(str string) bool { + for _, c := range str { + if c < '0' || c > '9' { + return false + } + } + return true +} + +// GetServersByTelegramID finds all servers associated with a Telegram ID +func (s *ServerService) GetServersByTelegramID(ctx context.Context, telegramID string) ([]*models.Server, error) { + // Validate telegram ID + if !s.isNumeric(telegramID) { + return nil, fmt.Errorf("telegram_id must be numeric: %s", telegramID) + } + + // Convert to int64 + telegramIDInt, err := strconv.ParseInt(telegramID, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid telegram_id format: %w", err) + } + + // Get all identifiers with this telegram ID OR identifier (for backward compatibility) + identifiers, err := s.identifierRepo.GetByTelegramIDOrIdentifier(ctx, telegramIDInt, telegramID) + if err != nil { + return nil, fmt.Errorf("failed to get identifiers: %w", err) + } + + if len(identifiers) == 0 { + return []*models.Server{}, nil + } + + // Get unique server IDs + serverIDMap := make(map[string]bool) + for _, identifier := range identifiers { + serverIDMap[identifier.ServerID] = true + } + + // Fetch all servers + servers := make([]*models.Server, 0, len(serverIDMap)) + for serverID := range serverIDMap { + server, err := s.serverRepo.GetByID(ctx, serverID) + if err != nil { + s.logger.WithError(err).WithField("server_id", serverID).Warn("Failed to get server by ID") + continue + } + servers = append(servers, server) + } + + s.logger.WithFields(logrus.Fields{ + "telegram_id": telegramID, + "servers_count": len(servers), + }).Info("Servers retrieved by Telegram ID") + + return servers, nil +} + +// UpdateTelegramID updates telegram_id for an existing server source identifier +func (s *ServerService) UpdateTelegramID(ctx context.Context, serverID, sourceType, identifier string, telegramID int64) error { + // Get existing identifier + existing, err := s.identifierRepo.GetByServerIDAndIdentifier(ctx, serverID, sourceType, identifier) + if err != nil { + return fmt.Errorf("identifier not found: %w", err) + } + + // Update telegram_id + existing.TelegramID = &telegramID + existing.UpdatedAt = time.Now() + + // Add update metadata + if existing.Metadata == nil { + existing.Metadata = make(map[string]interface{}) + } + existing.Metadata["telegram_linked_at"] = time.Now().Format(time.RFC3339) + + // Save changes + if err := s.identifierRepo.Update(ctx, existing); err != nil { + return fmt.Errorf("failed to update telegram_id: %w", err) + } + + s.logger.WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": sourceType, + "identifier": identifier, + "telegram_id": telegramID, + }).Info("Telegram ID updated successfully") + + return nil +} diff --git a/internal/services/server_service_test.go b/internal/services/server_service_test.go index 4cf3d80..ddce5db 100644 --- a/internal/services/server_service_test.go +++ b/internal/services/server_service_test.go @@ -37,6 +37,14 @@ type MockServerRepo struct { mock.Mock } +type MockIdentifierRepo struct { + mock.Mock +} + +type MockKeyRepo struct { + mock.Mock +} + func (m *MockServerRepo) Create(ctx context.Context, server *models.Server) error { args := m.Called(ctx, server) return args.Error(0) @@ -97,8 +105,85 @@ func (m *MockServerRepo) UpdateLastSeen(ctx context.Context, serverID string, la return args.Error(0) } -type MockKeyRepo struct { - mock.Mock +// MockIdentifierRepo methods +func (m *MockIdentifierRepo) Create(ctx context.Context, identifier *models.ServerSourceIdentifier) error { + args := m.Called(ctx, identifier) + return args.Error(0) +} + +func (m *MockIdentifierRepo) GetByID(ctx context.Context, id int64) (*models.ServerSourceIdentifier, error) { + args := m.Called(ctx, id) + return args.Get(0).(*models.ServerSourceIdentifier), args.Error(1) +} + +func (m *MockIdentifierRepo) GetByServerID(ctx context.Context, serverID string) ([]*models.ServerSourceIdentifier, error) { + args := m.Called(ctx, serverID) + return args.Get(0).([]*models.ServerSourceIdentifier), args.Error(1) +} + +func (m *MockIdentifierRepo) GetByServerIDAndSourceType(ctx context.Context, serverID, sourceType string) ([]*models.ServerSourceIdentifier, error) { + args := m.Called(ctx, serverID, sourceType) + return args.Get(0).([]*models.ServerSourceIdentifier), args.Error(1) +} + +func (m *MockIdentifierRepo) GetByServerIDAndIdentifier(ctx context.Context, serverID, sourceType, identifier string) (*models.ServerSourceIdentifier, error) { + args := m.Called(ctx, serverID, sourceType, identifier) + return args.Get(0).(*models.ServerSourceIdentifier), args.Error(1) +} + +func (m *MockIdentifierRepo) Update(ctx context.Context, identifier *models.ServerSourceIdentifier) error { + args := m.Called(ctx, identifier) + return args.Error(0) +} + +func (m *MockIdentifierRepo) Delete(ctx context.Context, id int64) error { + args := m.Called(ctx, id) + return args.Error(0) +} + +func (m *MockIdentifierRepo) DeleteByServerIDAndSourceType(ctx context.Context, serverID, sourceType string) error { + args := m.Called(ctx, serverID, sourceType) + return args.Error(0) +} + +func (m *MockIdentifierRepo) DeleteByServerIDSourceTypeAndIdentifier(ctx context.Context, serverID, sourceType, identifier string) error { + args := m.Called(ctx, serverID, sourceType, identifier) + return args.Error(0) +} + +func (m *MockIdentifierRepo) CreateBatch(ctx context.Context, identifiers []*models.ServerSourceIdentifier) error { + args := m.Called(ctx, identifiers) + return args.Error(0) +} + +func (m *MockIdentifierRepo) DeleteBatch(ctx context.Context, ids []int64) error { + args := m.Called(ctx, ids) + return args.Error(0) +} + +func (m *MockIdentifierRepo) GetAllByServerID(ctx context.Context, serverID string) (map[string][]*models.ServerSourceIdentifier, error) { + args := m.Called(ctx, serverID) + return args.Get(0).(map[string][]*models.ServerSourceIdentifier), args.Error(1) +} + +func (m *MockIdentifierRepo) GetByIdentifier(ctx context.Context, identifierType, identifier string) ([]*models.ServerSourceIdentifier, error) { + args := m.Called(ctx, identifierType, identifier) + return args.Get(0).([]*models.ServerSourceIdentifier), args.Error(1) +} + +func (m *MockIdentifierRepo) GetByTelegramID(ctx context.Context, telegramID int64) ([]*models.ServerSourceIdentifier, error) { + args := m.Called(ctx, telegramID) + return args.Get(0).([]*models.ServerSourceIdentifier), args.Error(1) +} + +func (m *MockIdentifierRepo) GetByTelegramIDOrIdentifier(ctx context.Context, telegramID int64, identifier string) ([]*models.ServerSourceIdentifier, error) { + args := m.Called(ctx, telegramID, identifier) + return args.Get(0).([]*models.ServerSourceIdentifier), args.Error(1) +} + +func (m *MockIdentifierRepo) Ping(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) } func (m *MockKeyRepo) Create(ctx context.Context, key *models.GeneratedKey) error { @@ -144,9 +229,10 @@ func (m *MockKeyRepo) ListByStatus(ctx context.Context, status string) ([]*model func TestNewServerService(t *testing.T) { mockServerRepo := &MockServerRepo{} mockKeyRepo := &MockKeyRepo{} + mockIdentifierRepo := &MockIdentifierRepo{} logger := logrus.New() - service := NewServerService(mockServerRepo, mockKeyRepo, logger) + service := NewServerService(mockServerRepo, mockKeyRepo, mockIdentifierRepo, logger) assert.NotNil(t, service) } diff --git a/internal/services/tiered_metrics.go b/internal/services/tiered_metrics.go index 287ed9f..0904fec 100644 --- a/internal/services/tiered_metrics.go +++ b/internal/services/tiered_metrics.go @@ -2,6 +2,7 @@ package services import ( "context" + "database/sql" "fmt" "time" @@ -12,13 +13,15 @@ import ( // TieredMetricsService handles tiered metrics with automatic granularity selection type TieredMetricsService struct { timescaleDB *timescaledb.Client + pgDB *sql.DB logger *logrus.Logger } // NewTieredMetricsService creates a new tiered metrics service -func NewTieredMetricsService(timescaleDB *timescaledb.Client, logger *logrus.Logger) *TieredMetricsService { +func NewTieredMetricsService(timescaleDB *timescaledb.Client, pgDB *sql.DB, logger *logrus.Logger) *TieredMetricsService { return &TieredMetricsService{ timescaleDB: timescaleDB, + pgDB: pgDB, logger: logger, } } @@ -364,6 +367,22 @@ func (s *TieredMetricsService) calculateAverageSlice(points []timescaledb.Tiered } } +// GetServerIDByKey converts server_key to server_id +func (s *TieredMetricsService) GetServerIDByKey(ctx context.Context, serverKey string) (string, error) { + var serverID string + query := `SELECT server_id FROM generated_keys WHERE server_key = $1` + + err := s.pgDB.QueryRowContext(ctx, query, serverKey).Scan(&serverID) + if err != nil { + if err == sql.ErrNoRows { + return "", fmt.Errorf("server key not found: %s", serverKey) + } + return "", fmt.Errorf("failed to query server_id by key: %w", err) + } + + return serverID, nil +} + func (s *TieredMetricsService) calculateAverages(points []timescaledb.TieredMetricsPoint) *MetricAverages { return s.calculateAverageSlice(points) } diff --git a/internal/storage/interfaces/server_source_identifiers.go b/internal/storage/interfaces/server_source_identifiers.go new file mode 100644 index 0000000..66cba35 --- /dev/null +++ b/internal/storage/interfaces/server_source_identifiers.go @@ -0,0 +1,92 @@ +// Copyright (c) 2026 godofphonk +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package interfaces + +import ( + "context" + + "github.com/godofphonk/ServerEyeAPI/internal/models" +) + +// ServerSourceIdentifierRepository defines operations for server source identifiers +type ServerSourceIdentifierRepository interface { + // Basic CRUD operations + Create(ctx context.Context, identifier *models.ServerSourceIdentifier) error + GetByID(ctx context.Context, id int64) (*models.ServerSourceIdentifier, error) + GetByServerID(ctx context.Context, serverID string) ([]*models.ServerSourceIdentifier, error) + GetByServerIDAndSourceType(ctx context.Context, serverID, sourceType string) ([]*models.ServerSourceIdentifier, error) + GetByServerIDAndIdentifier(ctx context.Context, serverID, sourceType, identifier string) (*models.ServerSourceIdentifier, error) + Update(ctx context.Context, identifier *models.ServerSourceIdentifier) error + Delete(ctx context.Context, id int64) error + DeleteByServerIDAndSourceType(ctx context.Context, serverID, sourceType string) error + DeleteByServerIDSourceTypeAndIdentifier(ctx context.Context, serverID, sourceType, identifier string) error + + // Batch operations + CreateBatch(ctx context.Context, identifiers []*models.ServerSourceIdentifier) error + DeleteBatch(ctx context.Context, ids []int64) error + + // Query operations + GetAllByServerID(ctx context.Context, serverID string) (map[string][]*models.ServerSourceIdentifier, error) + GetByIdentifier(ctx context.Context, identifierType, identifier string) ([]*models.ServerSourceIdentifier, error) + GetByTelegramID(ctx context.Context, telegramID int64) ([]*models.ServerSourceIdentifier, error) + GetByTelegramIDOrIdentifier(ctx context.Context, telegramID int64, identifier string) ([]*models.ServerSourceIdentifier, error) + + // Health check + Ping(ctx context.Context) error +} + +// SourceIdentifierListOption defines options for list operations +type SourceIdentifierListOption func(*SourceIdentifierListOptions) + +type SourceIdentifierListOptions struct { + SourceType string + IdentifierType string + Limit int + Offset int +} + +// WithSourceType filters by source type +func WithSourceType(sourceType string) SourceIdentifierListOption { + return func(opts *SourceIdentifierListOptions) { + opts.SourceType = sourceType + } +} + +// WithIdentifierType filters by identifier type +func WithIdentifierType(identifierType string) SourceIdentifierListOption { + return func(opts *SourceIdentifierListOptions) { + opts.IdentifierType = identifierType + } +} + +// WithIdentifierLimit sets the limit for list operations +func WithIdentifierLimit(limit int) SourceIdentifierListOption { + return func(opts *SourceIdentifierListOptions) { + opts.Limit = limit + } +} + +// WithIdentifierOffset sets the offset for list operations +func WithIdentifierOffset(offset int) SourceIdentifierListOption { + return func(opts *SourceIdentifierListOptions) { + opts.Offset = offset + } +} diff --git a/internal/storage/repositories/postgres/server_source_identifiers_repository.go b/internal/storage/repositories/postgres/server_source_identifiers_repository.go new file mode 100644 index 0000000..1a04902 --- /dev/null +++ b/internal/storage/repositories/postgres/server_source_identifiers_repository.go @@ -0,0 +1,528 @@ +// Copyright (c) 2026 godofphonk +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package postgres + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "time" + + "github.com/godofphonk/ServerEyeAPI/internal/models" + "github.com/godofphonk/ServerEyeAPI/internal/storage/interfaces" + "github.com/lib/pq" + "github.com/sirupsen/logrus" +) + +// ServerSourceIdentifierRepository implements interfaces.ServerSourceIdentifierRepository for PostgreSQL +type ServerSourceIdentifierRepository struct { + db *sql.DB + logger *logrus.Logger +} + +// NewServerSourceIdentifierRepository creates a new PostgreSQL server source identifier repository +func NewServerSourceIdentifierRepository(db *sql.DB, logger *logrus.Logger) interfaces.ServerSourceIdentifierRepository { + return &ServerSourceIdentifierRepository{ + db: db, + logger: logger, + } +} + +// Create creates a new server source identifier +func (r *ServerSourceIdentifierRepository) Create(ctx context.Context, identifier *models.ServerSourceIdentifier) error { + query := ` + INSERT INTO server_source_identifiers (server_id, source_type, identifier, identifier_type, telegram_id, metadata, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + RETURNING id + ` + + var metadataJSON []byte + var err error + if identifier.Metadata != nil { + metadataJSON, err = json.Marshal(identifier.Metadata) + if err != nil { + return fmt.Errorf("failed to marshal metadata: %w", err) + } + } else { + metadataJSON = []byte("{}") + } + + var telegramIDValue interface{} + if identifier.TelegramID != nil { + telegramIDValue = *identifier.TelegramID + } else { + telegramIDValue = nil + } + + err = r.db.QueryRowContext(ctx, query, + identifier.ServerID, + identifier.SourceType, + identifier.Identifier, + identifier.IdentifierType, + telegramIDValue, + metadataJSON, + time.Now(), + time.Now(), + ).Scan(&identifier.ID) + + if err != nil { + return fmt.Errorf("failed to create server source identifier: %w", err) + } + + r.logger.WithFields(logrus.Fields{ + "id": identifier.ID, + "server_id": identifier.ServerID, + "source_type": identifier.SourceType, + "identifier": identifier.Identifier, + "identifier_type": identifier.IdentifierType, + }).Info("Server source identifier created successfully") + + return nil +} + +// GetByID retrieves a server source identifier by ID +func (r *ServerSourceIdentifierRepository) GetByID(ctx context.Context, id int64) (*models.ServerSourceIdentifier, error) { + query := ` + SELECT id, server_id, source_type, identifier, identifier_type, telegram_id, metadata, created_at, updated_at + FROM server_source_identifiers + WHERE id = $1 + ` + + var identifier models.ServerSourceIdentifier + var metadataJSON []byte + + err := r.db.QueryRowContext(ctx, query, id).Scan( + &identifier.ID, + &identifier.ServerID, + &identifier.SourceType, + &identifier.Identifier, + &identifier.IdentifierType, + &identifier.TelegramID, + &metadataJSON, + &identifier.CreatedAt, + &identifier.UpdatedAt, + ) + + if err != nil { + if err == sql.ErrNoRows { + return nil, fmt.Errorf("server source identifier not found for id: %d", id) + } + return nil, fmt.Errorf("failed to get server source identifier: %w", err) + } + + if len(metadataJSON) > 0 && string(metadataJSON) != "null" { + if err := json.Unmarshal(metadataJSON, &identifier.Metadata); err != nil { + return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) + } + } else { + identifier.Metadata = make(map[string]interface{}) + } + + return &identifier, nil +} + +// GetByServerID retrieves all server source identifiers for a server +func (r *ServerSourceIdentifierRepository) GetByServerID(ctx context.Context, serverID string) ([]*models.ServerSourceIdentifier, error) { + query := ` + SELECT id, server_id, source_type, identifier, identifier_type, telegram_id, metadata, created_at, updated_at + FROM server_source_identifiers + WHERE server_id = $1 + ORDER BY created_at DESC + ` + + return r.scanIdentifiers(ctx, query, serverID) +} + +// GetByServerIDAndSourceType retrieves identifiers for a server and source type +func (r *ServerSourceIdentifierRepository) GetByServerIDAndSourceType(ctx context.Context, serverID, sourceType string) ([]*models.ServerSourceIdentifier, error) { + query := ` + SELECT id, server_id, source_type, identifier, identifier_type, telegram_id, metadata, created_at, updated_at + FROM server_source_identifiers + WHERE server_id = $1 AND source_type = $2 + ORDER BY created_at DESC + ` + + return r.scanIdentifiers(ctx, query, serverID, sourceType) +} + +// GetByServerIDAndIdentifier retrieves a specific identifier +func (r *ServerSourceIdentifierRepository) GetByServerIDAndIdentifier(ctx context.Context, serverID, sourceType, identifier string) (*models.ServerSourceIdentifier, error) { + query := ` + SELECT id, server_id, source_type, identifier, identifier_type, telegram_id, metadata, created_at, updated_at + FROM server_source_identifiers + WHERE server_id = $1 AND source_type = $2 AND identifier = $3 + ` + + var ident models.ServerSourceIdentifier + var metadataJSON []byte + + err := r.db.QueryRowContext(ctx, query, serverID, sourceType, identifier).Scan( + &ident.ID, + &ident.ServerID, + &ident.SourceType, + &ident.Identifier, + &ident.IdentifierType, + &ident.TelegramID, + &metadataJSON, + &ident.CreatedAt, + &ident.UpdatedAt, + ) + + if err != nil { + if err == sql.ErrNoRows { + return nil, fmt.Errorf("server source identifier not found") + } + return nil, fmt.Errorf("failed to get server source identifier: %w", err) + } + + if len(metadataJSON) > 0 && string(metadataJSON) != "null" { + if err := json.Unmarshal(metadataJSON, &ident.Metadata); err != nil { + return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) + } + } else { + ident.Metadata = make(map[string]interface{}) + } + + return &ident, nil +} + +// Update updates a server source identifier +func (r *ServerSourceIdentifierRepository) Update(ctx context.Context, identifier *models.ServerSourceIdentifier) error { + query := ` + UPDATE server_source_identifiers + SET identifier_type = $2, telegram_id = $3, metadata = $4, updated_at = $5 + WHERE id = $1 + ` + + var metadataJSON []byte + var err error + if identifier.Metadata != nil { + metadataJSON, err = json.Marshal(identifier.Metadata) + if err != nil { + return fmt.Errorf("failed to marshal metadata: %w", err) + } + } else { + metadataJSON = []byte("{}") + } + + var telegramIDValue interface{} + if identifier.TelegramID != nil { + telegramIDValue = *identifier.TelegramID + } else { + telegramIDValue = nil + } + + result, err := r.db.ExecContext(ctx, query, + identifier.ID, + identifier.IdentifierType, + telegramIDValue, + metadataJSON, + time.Now(), + ) + + if err != nil { + return fmt.Errorf("failed to update server source identifier: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } + + if rowsAffected == 0 { + return fmt.Errorf("no rows affected when updating server source identifier with id: %d", identifier.ID) + } + + r.logger.WithFields(logrus.Fields{ + "id": identifier.ID, + }).Info("Server source identifier updated successfully") + + return nil +} + +// Delete deletes a server source identifier by ID +func (r *ServerSourceIdentifierRepository) Delete(ctx context.Context, id int64) error { + query := `DELETE FROM server_source_identifiers WHERE id = $1` + + result, err := r.db.ExecContext(ctx, query, id) + if err != nil { + return fmt.Errorf("failed to delete server source identifier: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } + + if rowsAffected == 0 { + return fmt.Errorf("no rows affected when deleting server source identifier with id: %d", id) + } + + r.logger.WithFields(logrus.Fields{ + "id": id, + }).Info("Server source identifier deleted successfully") + + return nil +} + +// DeleteByServerIDAndSourceType deletes all identifiers for a server and source type +func (r *ServerSourceIdentifierRepository) DeleteByServerIDAndSourceType(ctx context.Context, serverID, sourceType string) error { + query := `DELETE FROM server_source_identifiers WHERE server_id = $1 AND source_type = $2` + + result, err := r.db.ExecContext(ctx, query, serverID, sourceType) + if err != nil { + return fmt.Errorf("failed to delete server source identifiers: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } + + r.logger.WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": sourceType, + "rows_count": rowsAffected, + }).Info("Server source identifiers deleted successfully") + + return nil +} + +// DeleteByServerIDSourceTypeAndIdentifier deletes a specific identifier +func (r *ServerSourceIdentifierRepository) DeleteByServerIDSourceTypeAndIdentifier(ctx context.Context, serverID, sourceType, identifier string) error { + query := `DELETE FROM server_source_identifiers WHERE server_id = $1 AND source_type = $2 AND identifier = $3` + + result, err := r.db.ExecContext(ctx, query, serverID, sourceType, identifier) + if err != nil { + return fmt.Errorf("failed to delete server source identifier: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } + + if rowsAffected == 0 { + return fmt.Errorf("no rows affected when deleting server source identifier") + } + + r.logger.WithFields(logrus.Fields{ + "server_id": serverID, + "source_type": sourceType, + "identifier": identifier, + }).Info("Server source identifier deleted successfully") + + return nil +} + +// CreateBatch creates multiple identifiers in a single transaction +func (r *ServerSourceIdentifierRepository) CreateBatch(ctx context.Context, identifiers []*models.ServerSourceIdentifier) error { + tx, err := r.db.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("failed to begin transaction: %w", err) + } + defer tx.Rollback() + + query := ` + INSERT INTO server_source_identifiers (server_id, source_type, identifier, identifier_type, telegram_id, metadata, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + RETURNING id + ` + + for _, identifier := range identifiers { + var metadataJSON []byte + if identifier.Metadata != nil { + metadataJSON, err = json.Marshal(identifier.Metadata) + if err != nil { + return fmt.Errorf("failed to marshal metadata: %w", err) + } + } else { + metadataJSON = []byte("{}") + } + + var telegramIDValue interface{} + if identifier.TelegramID != nil { + telegramIDValue = *identifier.TelegramID + } else { + telegramIDValue = nil + } + + err = tx.QueryRowContext(ctx, query, + identifier.ServerID, + identifier.SourceType, + identifier.Identifier, + identifier.IdentifierType, + telegramIDValue, + metadataJSON, + time.Now(), + time.Now(), + ).Scan(&identifier.ID) + + if err != nil { + return fmt.Errorf("failed to create server source identifier: %w", err) + } + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("failed to commit transaction: %w", err) + } + + r.logger.WithFields(logrus.Fields{ + "count": len(identifiers), + }).Info("Server source identifiers created successfully in batch") + + return nil +} + +// DeleteBatch deletes multiple identifiers in a single transaction +func (r *ServerSourceIdentifierRepository) DeleteBatch(ctx context.Context, ids []int64) error { + if len(ids) == 0 { + return nil + } + + tx, err := r.db.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("failed to begin transaction: %w", err) + } + defer tx.Rollback() + + query := `DELETE FROM server_source_identifiers WHERE id = ANY($1)` + + result, err := tx.ExecContext(ctx, query, pq.Array(ids)) + if err != nil { + return fmt.Errorf("failed to delete server source identifiers: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("failed to commit transaction: %w", err) + } + + r.logger.WithFields(logrus.Fields{ + "count": len(ids), + "rows_count": rowsAffected, + }).Info("Server source identifiers deleted successfully in batch") + + return nil +} + +// GetAllByServerID retrieves all identifiers grouped by source type +func (r *ServerSourceIdentifierRepository) GetAllByServerID(ctx context.Context, serverID string) (map[string][]*models.ServerSourceIdentifier, error) { + identifiers, err := r.GetByServerID(ctx, serverID) + if err != nil { + return nil, err + } + + result := make(map[string][]*models.ServerSourceIdentifier) + for _, identifier := range identifiers { + result[identifier.SourceType] = append(result[identifier.SourceType], identifier) + } + + return result, nil +} + +// GetByIdentifier finds all servers with a specific identifier +func (r *ServerSourceIdentifierRepository) GetByIdentifier(ctx context.Context, identifierType, identifier string) ([]*models.ServerSourceIdentifier, error) { + query := ` + SELECT id, server_id, source_type, identifier, identifier_type, telegram_id, metadata, created_at, updated_at + FROM server_source_identifiers + WHERE identifier_type = $1 AND identifier = $2 + ORDER BY created_at DESC + ` + + return r.scanIdentifiers(ctx, query, identifierType, identifier) +} + +// GetByTelegramID finds all servers with a specific telegram_id +func (r *ServerSourceIdentifierRepository) GetByTelegramID(ctx context.Context, telegramID int64) ([]*models.ServerSourceIdentifier, error) { + query := ` + SELECT id, server_id, source_type, identifier, identifier_type, telegram_id, metadata, created_at, updated_at + FROM server_source_identifiers + WHERE telegram_id = $1 + ORDER BY created_at DESC + ` + + return r.scanIdentifiers(ctx, query, telegramID) +} + +// GetByTelegramIDOrIdentifier finds all servers with telegram_id or identifier (for backward compatibility) +func (r *ServerSourceIdentifierRepository) GetByTelegramIDOrIdentifier(ctx context.Context, telegramID int64, identifier string) ([]*models.ServerSourceIdentifier, error) { + query := ` + SELECT id, server_id, source_type, identifier, identifier_type, telegram_id, metadata, created_at, updated_at + FROM server_source_identifiers + WHERE telegram_id = $1 OR identifier = $2 + ORDER BY created_at DESC + ` + + return r.scanIdentifiers(ctx, query, telegramID, identifier) +} + +// Ping checks database connectivity +func (r *ServerSourceIdentifierRepository) Ping(ctx context.Context) error { + return r.db.PingContext(ctx) +} + +// Helper method to scan identifiers from rows +func (r *ServerSourceIdentifierRepository) scanIdentifiers(ctx context.Context, query string, args ...interface{}) ([]*models.ServerSourceIdentifier, error) { + rows, err := r.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("failed to query server source identifiers: %w", err) + } + defer rows.Close() + + var identifiers []*models.ServerSourceIdentifier + for rows.Next() { + var identifier models.ServerSourceIdentifier + var metadataJSON []byte + + err := rows.Scan( + &identifier.ID, + &identifier.ServerID, + &identifier.SourceType, + &identifier.Identifier, + &identifier.IdentifierType, + &identifier.TelegramID, + &metadataJSON, + &identifier.CreatedAt, + &identifier.UpdatedAt, + ) + if err != nil { + return nil, fmt.Errorf("failed to scan server source identifier: %w", err) + } + + if len(metadataJSON) > 0 && string(metadataJSON) != "null" { + if err := json.Unmarshal(metadataJSON, &identifier.Metadata); err != nil { + return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) + } + } else { + identifier.Metadata = make(map[string]interface{}) + } + + identifiers = append(identifiers, &identifier) + } + + return identifiers, nil +} diff --git a/internal/storage/static_data.go b/internal/storage/static_data.go index 67bdda8..ebe37f5 100644 --- a/internal/storage/static_data.go +++ b/internal/storage/static_data.go @@ -62,7 +62,7 @@ type HardwareInfo struct { CPUFrequencyMHz float64 `json:"cpu_frequency_mhz"` GPUModel string `json:"gpu_model"` GPUDriver string `json:"gpu_driver"` - GPUMemoryGB int `json:"gpu_memory_gb"` + GPUMemoryGB float64 `json:"gpu_memory_gb"` TotalMemoryGB float64 `json:"total_memory_gb"` CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` @@ -253,7 +253,8 @@ func (s *PostgresStaticDataStorage) GetHardwareInfo(ctx context.Context, serverI info := &HardwareInfo{} var cpuModel, gpuModel, gpuDriver sql.NullString - var cpuCores, cpuThreads, gpuMemoryGB sql.NullInt64 + var cpuCores, cpuThreads sql.NullInt64 + var gpuMemoryGB sql.NullFloat64 var cpuFreq, totalMemory sql.NullFloat64 err := s.db.QueryRowContext(ctx, query, serverID).Scan( @@ -288,7 +289,7 @@ func (s *PostgresStaticDataStorage) GetHardwareInfo(ctx context.Context, serverI info.CPUFrequencyMHz = cpuFreq.Float64 } if gpuMemoryGB.Valid { - info.GPUMemoryGB = int(gpuMemoryGB.Int64) + info.GPUMemoryGB = gpuMemoryGB.Float64 } if totalMemory.Valid { info.TotalMemoryGB = totalMemory.Float64 diff --git a/internal/storage/timescaledb/multi_tier_metrics.go b/internal/storage/timescaledb/multi_tier_metrics.go index 8ae3b65..39e6134 100644 --- a/internal/storage/timescaledb/multi_tier_metrics.go +++ b/internal/storage/timescaledb/multi_tier_metrics.go @@ -17,6 +17,9 @@ const ( Granularity1Min MetricsGranularity = "1m" Granularity5Min MetricsGranularity = "5m" Granularity10Min MetricsGranularity = "10m" + Granularity30Min MetricsGranularity = "30m" + Granularity2Hour MetricsGranularity = "2h" + Granularity6Hour MetricsGranularity = "6h" Granularity1Hour MetricsGranularity = "1h" ) @@ -567,12 +570,14 @@ func (c *Client) determineGranularity(start, end time.Time) MetricsGranularity { if duration <= time.Hour { return Granularity1Min - } else if duration <= 3*time.Hour { - return Granularity5Min - } else if duration <= 24*time.Hour { + } else if duration <= 6*time.Hour { return Granularity10Min + } else if duration <= 24*time.Hour { + return Granularity30Min + } else if duration <= 7*24*time.Hour { + return Granularity2Hour } else { - return Granularity1Hour + return Granularity6Hour } } @@ -584,6 +589,12 @@ func (c *Client) getViewName(granularity MetricsGranularity) string { return "metrics_5m_avg" case Granularity10Min: return "metrics_10m_avg" + case Granularity30Min: + return "metrics_30m_avg" + case Granularity2Hour: + return "metrics_2h_avg" + case Granularity6Hour: + return "metrics_6h_avg" case Granularity1Hour: return "metrics_1h_avg" default: diff --git a/internal/websocket/server.go b/internal/websocket/server.go index 11dc257..59c120e 100644 --- a/internal/websocket/server.go +++ b/internal/websocket/server.go @@ -411,14 +411,70 @@ func (s *Server) handleMetrics(ctx context.Context, client *Client, msg models.W "data_keys": len(msg.Data), }).Info("Metrics message has data, parsing...") - // Parse metrics message - var metricsMsg models.MetricsMessage dataBytes, err := json.Marshal(msg.Data) if err != nil { s.logger.WithError(err).WithField("server_id", client.ServerID).Error("Failed to marshal metrics data") return } + // Try to parse as new format first (MetricsV2) + // Agent sends: {"type": "metrics", "server_id": "...", "data": {"metrics": {"metrics": {...}}}} + var nestedMsg struct { + Metrics models.MetricsV2 `json:"metrics"` + } + + parseErr := json.Unmarshal(dataBytes, &nestedMsg) + var newMetricsMsg struct { + Metrics models.MetricsV2 `json:"metrics"` + } + + if parseErr == nil && !nestedMsg.Metrics.Timestamp.IsZero() { + // Successfully parsed nested structure + newMetricsMsg.Metrics = nestedMsg.Metrics + parseErr = nil + } else { + // Fallback: try direct structure (data.metrics) + var directMsg struct { + Metrics models.MetricsV2 `json:"metrics"` + } + if directErr := json.Unmarshal(dataBytes, &directMsg); directErr == nil && !directMsg.Metrics.Timestamp.IsZero() { + newMetricsMsg = directMsg + parseErr = nil + } + } + + if parseErr == nil && !newMetricsMsg.Metrics.Timestamp.IsZero() { + s.logger.WithFields(logrus.Fields{ + "server_id": client.ServerID, + "format": "v2", + "cpu_total": newMetricsMsg.Metrics.CPUUsage.UsageTotal, + "memory_used": newMetricsMsg.Metrics.Memory.UsedPercent, + "temperature": newMetricsMsg.Metrics.Temperature.Highest, + }).Info("📊 Using new metrics format (V2)") + + // Convert V2 to old format for storage compatibility + oldMetrics := s.convertV2ToOldFormat(&newMetricsMsg.Metrics) + oldMetrics.Time = newMetricsMsg.Metrics.Timestamp + + s.logger.WithFields(logrus.Fields{ + "server_id": client.ServerID, + "cpu": oldMetrics.CPU, + "memory": oldMetrics.Memory, + "temperature": oldMetrics.TemperatureDetails.HighestTemperature, + }).Info("Storing V2 metrics") + + // Store converted metrics + if err := s.storage.StoreMetric(ctx, client.ServerID, oldMetrics); err != nil { + s.logger.WithError(err).WithField("server_id", client.ServerID).Error("Failed to store V2 metrics") + return + } + + s.logger.WithField("server_id", client.ServerID).Info("✅ Successfully stored V2 metrics") + return + } + + // Fallback to old format + var metricsMsg models.MetricsMessage if err := json.Unmarshal(dataBytes, &metricsMsg); err != nil { s.logger.WithError(err).WithField("server_id", client.ServerID).Error("Invalid metrics message format") return @@ -429,15 +485,16 @@ func (s *Server) handleMetrics(ctx context.Context, client *Client, msg models.W "cpu": metricsMsg.Metrics.CPU, "memory": metricsMsg.Metrics.Memory, "disk": metricsMsg.Metrics.Disk, - }).Info("Parsed metrics message, storing in Redis") + "format": "v1", + }).Info("📊 Using old metrics format (V1)") - // Store metrics in Redis + // Store metrics if err := s.storage.StoreMetric(ctx, client.ServerID, &metricsMsg.Metrics); err != nil { s.logger.WithError(err).WithField("server_id", client.ServerID).Error("Failed to store metrics") return } - s.logger.WithField("server_id", client.ServerID).Info("✅ Successfully stored metrics from WebSocket") + s.logger.WithField("server_id", client.ServerID).Info("✅ Successfully stored V1 metrics") } // handleHeartbeat handles heartbeat messages @@ -487,3 +544,120 @@ func (s *Server) SendToClient(serverID string, msg models.WSMessage) bool { return client.SendMessage(msg) } + +// convertV2ToOldFormat converts new MetricsV2 format to old ServerMetrics format +func (s *Server) convertV2ToOldFormat(v2 *models.MetricsV2) *models.ServerMetrics { + old := &models.ServerMetrics{} + + // Aggregated values for backward compatibility + old.CPU = v2.CPUUsage.UsageTotal + old.Memory = v2.Memory.UsedPercent + + // Calculate average disk usage + if len(v2.Disks) > 0 { + var totalDiskUsage float64 + for _, disk := range v2.Disks { + totalDiskUsage += disk.UsedPercent + } + old.Disk = totalDiskUsage / float64(len(v2.Disks)) + } + + // Calculate total network traffic in MB + var totalRxMB, totalTxMB float64 + for _, iface := range v2.Network.Interfaces { + totalRxMB += float64(iface.RxBytes) / 1024 / 1024 + totalTxMB += float64(iface.TxBytes) / 1024 / 1024 + } + old.Network = totalRxMB + totalTxMB + + // CPU detailed metrics + old.CPUUsage.UsageTotal = v2.CPUUsage.UsageTotal + old.CPUUsage.UsageUser = v2.CPUUsage.UsageUser + old.CPUUsage.UsageSystem = v2.CPUUsage.UsageSystem + old.CPUUsage.UsageIdle = v2.CPUUsage.UsageIdle + old.CPUUsage.LoadAverage.Load1 = v2.CPUUsage.LoadAverage.Load1Min + old.CPUUsage.LoadAverage.Load5 = v2.CPUUsage.LoadAverage.Load5Min + old.CPUUsage.LoadAverage.Load15 = v2.CPUUsage.LoadAverage.Load15Min + old.CPUUsage.Frequency = v2.CPUUsage.FrequencyMHz + + // Memory detailed metrics + old.MemoryDetails.TotalGB = v2.Memory.TotalGB + old.MemoryDetails.UsedGB = v2.Memory.UsedGB + old.MemoryDetails.AvailableGB = v2.Memory.AvailableGB + old.MemoryDetails.FreeGB = v2.Memory.FreeGB + old.MemoryDetails.BuffersGB = v2.Memory.BuffersGB + old.MemoryDetails.CachedGB = v2.Memory.CachedGB + old.MemoryDetails.UsedPercent = v2.Memory.UsedPercent + + // Disk detailed metrics + if len(v2.Disks) > 0 { + old.DiskDetails = make([]struct { + Path string `json:"path"` + TotalGB float64 `json:"total_gb"` + UsedGB float64 `json:"used_gb"` + FreeGB float64 `json:"free_gb"` + UsedPercent float64 `json:"used_percent"` + Filesystem string `json:"filesystem"` + }, len(v2.Disks)) + for i, disk := range v2.Disks { + old.DiskDetails[i].Path = disk.MountPoint + old.DiskDetails[i].UsedGB = disk.UsedGB + old.DiskDetails[i].FreeGB = disk.FreeGB + old.DiskDetails[i].UsedPercent = disk.UsedPercent + old.DiskDetails[i].TotalGB = disk.UsedGB + disk.FreeGB + } + } + + // Network detailed metrics + if len(v2.Network.Interfaces) > 0 { + old.NetworkDetails.Interfaces = make([]struct { + Name string `json:"name"` + RxBytes int64 `json:"rx_bytes"` + TxBytes int64 `json:"tx_bytes"` + RxPackets int64 `json:"rx_packets"` + TxPackets int64 `json:"tx_packets"` + RxSpeedMbps float64 `json:"rx_speed_mbps"` + TxSpeedMbps float64 `json:"tx_speed_mbps"` + Status string `json:"status"` + }, len(v2.Network.Interfaces)) + for i, iface := range v2.Network.Interfaces { + old.NetworkDetails.Interfaces[i].Name = iface.Name + old.NetworkDetails.Interfaces[i].RxBytes = iface.RxBytes + old.NetworkDetails.Interfaces[i].TxBytes = iface.TxBytes + old.NetworkDetails.Interfaces[i].RxPackets = iface.RxPackets + old.NetworkDetails.Interfaces[i].TxPackets = iface.TxPackets + old.NetworkDetails.Interfaces[i].RxSpeedMbps = iface.RxSpeedMbps + old.NetworkDetails.Interfaces[i].TxSpeedMbps = iface.TxSpeedMbps + old.NetworkDetails.Interfaces[i].Status = iface.Status + } + old.NetworkDetails.TotalRxMbps = v2.Network.TotalRxMbps + old.NetworkDetails.TotalTxMbps = v2.Network.TotalTxMbps + old.Network = v2.Network.TotalRxMbps + v2.Network.TotalTxMbps // Use total as aggregate + } + + // Temperature metrics + old.TemperatureDetails.CPUTemperature = v2.Temperature.CPU + old.TemperatureDetails.GPUTemperature = v2.Temperature.GPU + old.TemperatureDetails.HighestTemperature = v2.Temperature.Highest + + if len(v2.Temperature.Storage) > 0 { + old.TemperatureDetails.StorageTemperatures = make([]struct { + Device string `json:"device"` + Type string `json:"type"` + Temperature float64 `json:"temperature"` + }, len(v2.Temperature.Storage)) + + for i, storage := range v2.Temperature.Storage { + old.TemperatureDetails.StorageTemperatures[i].Device = storage.Device + old.TemperatureDetails.StorageTemperatures[i].Temperature = storage.Temperature + } + } + + // System metrics + old.SystemDetails.ProcessesTotal = v2.System.ProcessesTotal + old.SystemDetails.ProcessesRunning = v2.System.ProcessesRunning + old.SystemDetails.ProcessesSleeping = v2.System.ProcessesSleeping + old.SystemDetails.UptimeSeconds = v2.System.UptimeSeconds + + return old +} diff --git a/internal/wire/wire.go b/internal/wire/wire.go index 990997a..e44af19 100644 --- a/internal/wire/wire.go +++ b/internal/wire/wire.go @@ -24,6 +24,7 @@ package wire import ( + "database/sql" "fmt" "github.com/godofphonk/ServerEyeAPI/internal/api" @@ -49,6 +50,7 @@ var ProviderSet = wire.NewSet( NewPostgresClient, NewTimescaleDBClient, NewTimescaleDBStorageAdapter, + NewPostgresDB, // Add raw DB access // Repository layer postgresRepo.NewGeneratedKeyRepository, @@ -123,3 +125,8 @@ func NewTimescaleDBStorageAdapter( ) *storage.TimescaleDBStorageAdapter { return storage.NewTimescaleDBStorageAdapter(keyRepo, serverRepo, timescaleDB, logger, cfg) } + +// NewPostgresDB extracts raw DB from PostgreSQL client +func NewPostgresDB(client *postgresStorage.Client) *sql.DB { + return client.DB() +} diff --git a/internal/wire/wire_gen.go b/internal/wire/wire_gen.go index 933ca6f..c524f57 100644 --- a/internal/wire/wire_gen.go +++ b/internal/wire/wire_gen.go @@ -7,6 +7,7 @@ package wire import ( + "database/sql" "fmt" "github.com/godofphonk/ServerEyeAPI/internal/api" "github.com/godofphonk/ServerEyeAPI/internal/config" @@ -43,7 +44,8 @@ var ProviderSet = wire.NewSet( NewPostgresClient, NewTimescaleDBClient, - NewTimescaleDBStorageAdapter, postgres.NewGeneratedKeyRepository, postgres.NewServerRepository, services.NewServerService, services.NewMetricsService, services.NewTieredMetricsService, services.NewCommandsService, services.NewMetricsCommandsService, services.NewAuthService, websocket.NewServer, handlers.NewAuthHandler, handlers.NewHealthHandler, handlers.NewMetricsHandler, handlers.NewTieredMetricsHandler, handlers.NewServersHandler, handlers.NewServerSourcesHandler, handlers.NewCommandsHandler, api.New, + NewTimescaleDBStorageAdapter, + NewPostgresDB, postgres.NewGeneratedKeyRepository, postgres.NewServerRepository, storage.NewAPIKeyStorage, handlers.NewAPIKeyHandler, services.NewServerService, services.NewMetricsService, services.NewTieredMetricsService, services.NewCommandsService, services.NewMetricsCommandsService, services.NewAuthService, websocket.NewServer, handlers.NewAuthHandler, handlers.NewHealthHandler, handlers.NewMetricsHandler, handlers.NewTieredMetricsHandler, handlers.NewServersHandler, handlers.NewServerSourcesHandler, handlers.NewCommandsHandler, api.New, ) // NewLogger creates a new logger instance @@ -80,3 +82,8 @@ func NewTimescaleDBStorageAdapter( ) *storage.TimescaleDBStorageAdapter { return storage.NewTimescaleDBStorageAdapter(keyRepo, serverRepo, timescaleDB, logger, cfg) } + +// NewPostgresDB extracts raw DB from PostgreSQL client +func NewPostgresDB(client *postgres2.Client) *sql.DB { + return client.DB() +} diff --git a/pkg/models/unified_response.go b/pkg/models/unified_response.go new file mode 100644 index 0000000..e26e6cc --- /dev/null +++ b/pkg/models/unified_response.go @@ -0,0 +1,248 @@ +package models + +// UnifiedServerResponse combines metrics, status, and static info in one response +type UnifiedServerResponse struct { + ServerID string `json:"server_id"` + ServerKey string `json:"server_key,omitempty"` + Timestamp string `json:"timestamp"` + + // Metrics data (from /metrics endpoint - current server state) + Metrics interface{} `json:"metrics,omitempty"` + + // Status data (from /status endpoint) + Status interface{} `json:"status,omitempty"` + + // Static info data (from /static-info endpoint) + StaticInfo interface{} `json:"static_info,omitempty"` + + // Performance metadata + ResponseMeta ResponseMetadata `json:"response_meta"` +} + +// ResponseMetadata provides information about the unified response +type ResponseMetadata struct { + TotalResponseTimeMs int64 `json:"total_response_time_ms"` + ComponentsStatus map[string]ComponentStatus `json:"components_status"` + DataPointsCount map[string]int `json:"data_points_count"` +} + +// ComponentStatus indicates the status of each component +type ComponentStatus struct { + Available bool `json:"available"` + ResponseTime int64 `json:"response_time_ms"` + Error string `json:"error,omitempty"` +} + +// ServerMetricsResponse represents the metrics component +type ServerMetricsResponse struct { + ServerID string `json:"server_id"` + StartTime string `json:"start_time"` + EndTime string `json:"end_time"` + Granularity string `json:"granularity"` + DataPoints []DataPoint `json:"data_points"` + TotalPoints int `json:"total_points"` + NetworkDetails NetworkInfo `json:"network_details"` + DiskDetails DiskInfo `json:"disk_details"` + TempDetails TempInfo `json:"temperature_details"` +} + +// ServerStatusResponse represents the status component +type ServerStatusResponse struct { + ServerID string `json:"server_id"` + Status string `json:"status"` + LastSeen string `json:"last_seen"` + CPUUsage float64 `json:"cpu_usage"` + MemoryUsage float64 `json:"memory_usage"` + DiskUsage float64 `json:"disk_usage"` + NetworkStatus string `json:"network_status"` + IsOnline bool `json:"is_online"` + Alerts []Alert `json:"alerts,omitempty"` +} + +// StaticInfoResponse represents the static info component +type StaticInfoResponse struct { + ServerInfo *ServerStaticInfo `json:"server_info"` + Hardware *HardwareInfo `json:"hardware"` + Network *NetworkStaticInfo `json:"network"` + Storage *StorageInfo `json:"storage"` + System *SystemInfo `json:"system"` +} + +// Reusing existing models from other files... +type DataPoint struct { + Timestamp string `json:"timestamp"` + CPUAvg float64 `json:"cpu_avg"` + CPUMax float64 `json:"cpu_max"` + CPUMin float64 `json:"cpu_min"` + MemoryAvg float64 `json:"memory_avg"` + MemoryMax float64 `json:"memory_max"` + MemoryMin float64 `json:"memory_min"` + DiskAvg float64 `json:"disk_avg"` + DiskMax float64 `json:"disk_max"` + NetworkAvg float64 `json:"network_avg"` + NetworkMax float64 `json:"network_max"` + TempAvg float64 `json:"temp_avg"` + TempMax float64 `json:"temp_max"` + LoadAvg float64 `json:"load_avg"` + LoadMax float64 `json:"load_max"` + SampleCount int `json:"sample_count"` +} + +type NetworkInfo struct { + Interfaces []NetworkInterfaceInfo `json:"interfaces"` + TotalRxMbps float64 `json:"total_rx_mbps"` + TotalTxMbps float64 `json:"total_tx_mbps"` +} + +type DiskInfo struct { + Disks []DiskDriveInfo `json:"disks"` +} + +type TempInfo struct { + CPUTemperature float64 `json:"cpu_temperature"` + GPUTemperature float64 `json:"gpu_temperature"` + SystemTemperature float64 `json:"system_temperature"` + StorageTemperatures map[string]float64 `json:"storage_temperatures"` + HighestTemperature float64 `json:"highest_temperature"` + TemperatureUnit string `json:"temperature_unit"` +} + +type Alert struct { + ID string `json:"id"` + Type string `json:"type"` + Severity string `json:"severity"` + Message string `json:"message"` + CreatedAt string `json:"created_at"` + ResolvedAt string `json:"resolved_at,omitempty"` +} + +// Static info types (simplified versions) +type ServerStaticInfo struct { + ServerID string `json:"server_id"` + Hostname string `json:"hostname"` + OSInfo string `json:"os_info"` + Kernel string `json:"kernel"` + Architecture string `json:"architecture"` + UptimeSeconds int64 `json:"uptime_seconds"` + UptimeHuman string `json:"uptime_human"` + BootTime string `json:"boot_time"` + AgentVersion string `json:"agent_version"` +} + +type HardwareInfo struct { + CPU CPUInfo `json:"cpu"` + Memory MemoryInfo `json:"memory"` + Motherboard MotherboardInfo `json:"motherboard"` +} + +type NetworkStaticInfo struct { + Interfaces []NetworkInterfaceStatic `json:"interfaces"` + Routes []Route `json:"routes"` + DNS []DNS `json:"dns"` +} + +type StorageInfo struct { + Disks []DiskStatic `json:"disks"` + Raids []RAID `json:"raids,omitempty"` +} + +type SystemInfo struct { + OS OSInfo `json:"os"` + Processes ProcessInfo `json:"processes"` + Services []Service `json:"services"` +} + +// Placeholder types for compilation +type CPUInfo struct { + Model string `json:"model"` + Cores int `json:"cores"` + Threads int `json:"threads"` + Frequency float64 `json:"frequency"` +} + +type MemoryInfo struct { + TotalGB float64 `json:"total_gb"` + AvailableGB float64 `json:"available_gb"` + Type string `json:"type"` +} + +type MotherboardInfo struct { + Manufacturer string `json:"manufacturer"` + Model string `json:"model"` + Version string `json:"version"` +} + +type NetworkInterfaceStatic struct { + Name string `json:"name"` + Status string `json:"status"` + MAC string `json:"mac"` + IPs []string `json:"ips"` + Speed int `json:"speed"` + Duplex string `json:"duplex"` +} + +type Route struct { + Destination string `json:"destination"` + Gateway string `json:"gateway"` + Interface string `json:"interface"` +} + +type DNS struct { + Server string `json:"server"` + Type string `json:"type"` +} + +type DiskStatic struct { + Path string `json:"path"` + Type string `json:"type"` + SizeGB float64 `json:"size_gb"` + Model string `json:"model"` + Serial string `json:"serial"` +} + +type RAID struct { + Name string `json:"name"` + Level string `json:"level"` + Status string `json:"status"` +} + +type OSInfo struct { + Name string `json:"name"` + Version string `json:"version"` + Build string `json:"build"` + Platform string `json:"platform"` +} + +type ProcessInfo struct { + Total int `json:"total"` + Running int `json:"running"` + Sleeping int `json:"sleeping"` +} + +type Service struct { + Name string `json:"name"` + Status string `json:"status"` + State string `json:"state"` +} + +// Additional types needed for compilation +type NetworkInterfaceInfo struct { + Name string `json:"name"` + MAC string `json:"mac"` + IP string `json:"ip"` + Status string `json:"status"` + SpeedMbps int `json:"speed_mbps"` + Type string `json:"type"` + IsPhysical bool `json:"is_physical"` +} + +type DiskDriveInfo struct { + Name string `json:"name"` + Model string `json:"model"` + SerialNumber string `json:"serial_number"` + SizeGB float64 `json:"size_gb"` + Type string `json:"type"` + Interface string `json:"interface"` + MountPoint string `json:"mount_point"` + IsSystemDisk bool `json:"is_system_disk"` +} diff --git a/test_granularity.sql b/test_granularity.sql new file mode 100644 index 0000000..34d4c31 --- /dev/null +++ b/test_granularity.sql @@ -0,0 +1,143 @@ +-- Test script for new optimized granularity system +-- Validates that the correct granularity is selected for different time ranges + +-- Test 1: 1 hour period - should use 1m granularity (max 60 points) +SELECT + '1h test' as test_name, + COUNT(*) as points_returned, + '1m' as expected_granularity, + granularity as actual_granularity, + CASE WHEN COUNT(*) <= 60 THEN 'PASS' ELSE 'FAIL' END as result +FROM get_metrics_by_granularity('test-server', + NOW() - INTERVAL '1 hour', + NOW() +); + +-- Test 2: 6 hours period - should use 10m granularity (max 36 points) +SELECT + '6h test' as test_name, + COUNT(*) as points_returned, + '10m' as expected_granularity, + granularity as actual_granularity, + CASE WHEN COUNT(*) <= 36 THEN 'PASS' ELSE 'FAIL' END as result +FROM get_metrics_by_granularity('test-server', + NOW() - INTERVAL '6 hours', + NOW() +); + +-- Test 3: 24 hours period - should use 30m granularity (max 48 points) +SELECT + '24h test' as test_name, + COUNT(*) as points_returned, + '30m' as expected_granularity, + granularity as actual_granularity, + CASE WHEN COUNT(*) <= 48 THEN 'PASS' ELSE 'FAIL' END as result +FROM get_metrics_by_granularity('test-server', + NOW() - INTERVAL '24 hours', + NOW() +); + +-- Test 4: 7 days period - should use 2h granularity (max 84 points) +SELECT + '7d test' as test_name, + COUNT(*) as points_returned, + '2h' as expected_granularity, + granularity as actual_granularity, + CASE WHEN COUNT(*) <= 84 THEN 'PASS' ELSE 'FAIL' END as result +FROM get_metrics_by_granularity('test-server', + NOW() - INTERVAL '7 days', + NOW() +); + +-- Test 5: 30 days period - should use 6h granularity (max 120 points) +SELECT + '30d test' as test_name, + COUNT(*) as points_returned, + '6h' as expected_granularity, + granularity as actual_granularity, + CASE WHEN COUNT(*) <= 120 THEN 'PASS' ELSE 'FAIL' END as result +FROM get_metrics_by_granularity('test-server', + NOW() - INTERVAL '30 days', + NOW() +); + +-- Summary of all tests +SELECT + test_name, + points_returned, + expected_granularity, + actual_granularity, + result +FROM ( + SELECT + '1h test' as test_name, + COUNT(*) as points_returned, + '1m' as expected_granularity, + granularity as actual_granularity, + CASE WHEN COUNT(*) <= 60 THEN 'PASS' ELSE 'FAIL' END as result + FROM get_metrics_by_granularity('test-server', NOW() - INTERVAL '1 hour', NOW()) + + UNION ALL + + SELECT + '6h test' as test_name, + COUNT(*) as points_returned, + '10m' as expected_granularity, + granularity as actual_granularity, + CASE WHEN COUNT(*) <= 36 THEN 'PASS' ELSE 'FAIL' END as result + FROM get_metrics_by_granularity('test-server', NOW() - INTERVAL '6 hours', NOW()) + + UNION ALL + + SELECT + '24h test' as test_name, + COUNT(*) as points_returned, + '30m' as expected_granularity, + granularity as actual_granularity, + CASE WHEN COUNT(*) <= 48 THEN 'PASS' ELSE 'FAIL' END as result + FROM get_metrics_by_granularity('test-server', NOW() - INTERVAL '24 hours', NOW()) + + UNION ALL + + SELECT + '7d test' as test_name, + COUNT(*) as points_returned, + '2h' as expected_granularity, + granularity as actual_granularity, + CASE WHEN COUNT(*) <= 84 THEN 'PASS' ELSE 'FAIL' END as result + FROM get_metrics_by_granularity('test-server', NOW() - INTERVAL '7 days', NOW()) + + UNION ALL + + SELECT + '30d test' as test_name, + COUNT(*) as points_returned, + '6h' as expected_granularity, + granularity as actual_granularity, + CASE WHEN COUNT(*) <= 120 THEN 'PASS' ELSE 'FAIL' END as result + FROM get_metrics_by_granularity('test-server', NOW() - INTERVAL '30 days', NOW()) +) tests; + +-- Performance comparison test +-- Compare query execution time between old and new granularities +EXPLAIN (ANALYZE, BUFFERS) +SELECT COUNT(*) FROM get_metrics_by_granularity('test-server', NOW() - INTERVAL '30 days', NOW()); + +-- Verify materialized views exist and are accessible +SELECT + schemaname, + tablename, + table_size +FROM pg_tables +WHERE tablename LIKE 'metrics_%_avg' +ORDER BY tablename; + +-- Check continuous aggregate policies +SELECT + view_name, + start_offset, + end_offset, + schedule_interval +FROM timescaledb_information.continuous_aggregate_policies +WHERE view_name IN ('metrics_30m_avg', 'metrics_2h_avg', 'metrics_6h_avg') +ORDER BY view_name;