diff --git a/.gitignore b/.gitignore index dc24eb3e..11f635a7 100644 --- a/.gitignore +++ b/.gitignore @@ -134,4 +134,18 @@ dist .idea k8s/output np-agent-manifest.yaml -.minikube_mount_pid \ No newline at end of file +.minikube_mount_pid + +.DS_Store +# Integration test runtime data +frontend/deployment/tests/integration/volume/ + +# Terraform/OpenTofu +.terraform/ +.terraform.lock.hcl + +# Generated test certificates +testing/docker/certs/ + +# Claude Code +.claude/ diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..e091370b --- /dev/null +++ b/Makefile @@ -0,0 +1,54 @@ +.PHONY: test test-all test-unit test-tofu test-integration help + +# Default test target - shows available options +test: + @echo "Usage: make test-" + @echo "" + @echo "Available test levels:" + @echo " make test-all Run all tests" + @echo " make test-unit Run BATS unit tests" + @echo " make test-tofu Run OpenTofu tests" + @echo " make test-integration Run integration tests" + @echo "" + @echo "You can also run tests for a specific module:" + @echo " make test-unit MODULE=frontend" + +# Run all tests +test-all: test-unit test-tofu test-integration + +# Run BATS unit tests +test-unit: +ifdef MODULE + @./testing/run_bats_tests.sh $(MODULE) +else + @./testing/run_bats_tests.sh +endif + +# Run OpenTofu tests +test-tofu: +ifdef MODULE + @./testing/run_tofu_tests.sh $(MODULE) +else + @./testing/run_tofu_tests.sh +endif + +# Run integration tests +test-integration: +ifdef MODULE + @./testing/run_integration_tests.sh $(MODULE) $(if $(VERBOSE),-v) +else + @./testing/run_integration_tests.sh $(if $(VERBOSE),-v) +endif + +# Help +help: + @echo "Test targets:" + @echo " test Show available test options" + @echo " test-all Run all tests" + @echo " test-unit Run BATS unit tests" + @echo " test-tofu Run OpenTofu tests" + @echo " test-integration Run integration tests" + @echo "" + @echo "Options:" + @echo " MODULE= Run tests for specific module (e.g., MODULE=frontend)" + @echo " VERBOSE=1 Show output of passing tests (integration tests only)" diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 00000000..35b2e28c --- /dev/null +++ b/TESTING.md @@ -0,0 +1,677 @@ +# Testing Guide + +This repository uses a comprehensive three-layer testing strategy to ensure reliability and correctness at every level of the infrastructure deployment pipeline. + +## Table of Contents + +- [Quick Start](#quick-start) +- [Test Layers Overview](#test-layers-overview) +- [Running Tests](#running-tests) +- [Unit Tests (BATS)](#unit-tests-bats) +- [Infrastructure Tests (OpenTofu)](#infrastructure-tests-opentofu) +- [Integration Tests](#integration-tests) +- [Test Helpers Reference](#test-helpers-reference) +- [Writing New Tests](#writing-new-tests) +- [Extending Test Helpers](#extending-test-helpers) + +--- + +## Quick Start + +```bash +# Run all tests +make test-all + +# Run specific test types +make test-unit # BATS unit tests +make test-tofu # OpenTofu infrastructure tests +make test-integration # End-to-end integration tests + +# Run tests for a specific module +make test-unit MODULE=frontend +make test-tofu MODULE=frontend +make test-integration MODULE=frontend +``` + +--- + +## Test Layers Overview + +Our testing strategy follows a pyramid approach with three distinct layers, each serving a specific purpose: + +``` + ┌─────────────────────┐ + │ Integration Tests │ Slow, Few + │ End-to-end flows │ + └──────────┬──────────┘ + │ + ┌───────────────┴───────────────┐ + │ OpenTofu Tests │ Medium + │ Infrastructure contracts │ + └───────────────┬───────────────┘ + │ + ┌───────────────────────────┴───────────────────────────┐ + │ Unit Tests │ Fast, Many + │ Script logic & behavior │ + └───────────────────────────────────────────────────────┘ +``` + +| Layer | Framework | Purpose | Speed | Coverage | +|-------|-----------|---------|-------|----------| +| **Unit** | BATS | Test bash scripts, setup logic, error handling | Fast (~seconds) | High | +| **Infrastructure** | OpenTofu | Validate Terraform/OpenTofu module contracts | Medium (~seconds) | Medium | +| **Integration** | BATS + Docker | End-to-end workflow validation with mocked services | Slow (~minutes) | Low | + +--- + +## Running Tests + +### Prerequisites + +| Tool | Required For | Installation | +|------|--------------|--------------| +| `bats` | Unit & Integration tests | `brew install bats-core` | +| `jq` | JSON processing | `brew install jq` | +| `tofu` | Infrastructure tests | `brew install opentofu` | +| `docker` | Integration tests | [Docker Desktop](https://docker.com) | + +### Makefile Commands + +```bash +# Show available test commands +make test + +# Run all test suites +make test-all + +# Run individual test suites +make test-unit +make test-tofu +make test-integration + +# Run tests for a specific module +make test-unit MODULE=frontend +make test-tofu MODULE=frontend +make test-integration MODULE=frontend + +# Run a single test file directly +bats frontend/deployment/tests/build_context_test.bats +tofu test # from within a modules directory +``` + +--- + +## Unit Tests (BATS) + +Unit tests validate the bash scripts that orchestrate the deployment pipeline. They test individual setup scripts, context building, error handling, and environment configuration. + +### What to Test + +- **Setup scripts**: Validate environment variable handling, error cases, output format +- **Context builders**: Verify JSON structure, required fields, transformations +- **Error handling**: Ensure proper exit codes and error messages +- **Mock integrations**: Test script behavior with mocked CLI tools (aws, np) + +### Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ test_file.bats │ +├─────────────────────────────────────────────────────────────────┤ +│ setup() │ +│ ├── source assertions.sh (shared test utilities) │ +│ ├── configure mock CLI tools (aws, np mocks) │ +│ └── set environment variables │ +│ │ +│ @test "description" { ... } │ +│ ├── run script_under_test │ +│ └── assert results │ +│ │ +│ teardown() │ +│ └── cleanup │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### Directory Structure + +``` +/ +├── / +│ └── setup # Script under test +└── tests/ + ├── resources/ + │ ├── context.json # Test fixtures + │ ├── aws_mocks/ # Mock AWS CLI responses + │ │ └── aws # Mock aws executable + │ └── np_mocks/ # Mock np CLI responses + │ └── np # Mock np executable + └── / + └── setup_test.bats # Test file +``` + +### File Naming Convention + +| Pattern | Description | +|---------|-------------| +| `*_test.bats` | BATS test files | +| `resources/` | Test fixtures and mock data | +| `*_mocks/` | Mock CLI tool directories | + +### Example Unit Test + +```bash +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for provider/aws/setup script +# ============================================================================= + +# Setup - runs before each test +setup() { + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_ROOT="$(cd "$TEST_DIR/../../.." && pwd)" + SCRIPT_PATH="$PROJECT_ROOT/provider/aws/setup" + + # Load shared test utilities + source "$PROJECT_ROOT/testing/assertions.sh" + + # Initialize required environment variables + export AWS_REGION="us-east-1" + export TOFU_PROVIDER_BUCKET="my-terraform-state" + export TOFU_LOCK_TABLE="terraform-locks" +} + +# Teardown - runs after each test +teardown() { + unset AWS_REGION TOFU_PROVIDER_BUCKET TOFU_LOCK_TABLE +} + +# ============================================================================= +# Tests +# ============================================================================= + +@test "fails when AWS_REGION is not set" { + unset AWS_REGION + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "AWS_REGION is not set" +} + +@test "exports correct TOFU_VARIABLES structure" { + source "$SCRIPT_PATH" + + local region=$(echo "$TOFU_VARIABLES" | jq -r '.aws_provider.region') + assert_equal "$region" "us-east-1" +} + +@test "appends to existing MODULES_TO_USE" { + export MODULES_TO_USE="existing/module" + + source "$SCRIPT_PATH" + + assert_contains "$MODULES_TO_USE" "existing/module" + assert_contains "$MODULES_TO_USE" "provider/aws/modules" +} +``` + +--- + +## Infrastructure Tests (OpenTofu) + +Infrastructure tests validate the OpenTofu/Terraform modules in isolation. They verify variable contracts, resource configurations, and module outputs without deploying real infrastructure. + +### What to Test + +- **Variable validation**: Required variables, type constraints, default values +- **Resource configuration**: Correct resource attributes based on inputs +- **Module outputs**: Expected outputs are produced with correct values +- **Edge cases**: Empty values, special characters, boundary conditions + +### Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ module.tftest.hcl │ +├─────────────────────────────────────────────────────────────────┤ +│ mock_provider "aws" {} (prevents real API calls) │ +│ │ +│ variables { ... } (test inputs) │ +│ │ │ +│ ▼ │ +│ ┌─────────────────────┐ │ +│ │ Terraform Module │ (main.tf, variables.tf, etc.) │ +│ │ under test │ │ +│ └─────────┬───────────┘ │ +│ │ │ +│ ▼ │ +│ run "test_name" { │ +│ command = plan │ +│ assert { condition = ... } (validate outputs/resources) │ +│ } │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### Directory Structure + +``` +/ +└── modules/ + ├── main.tf + ├── variables.tf + ├── outputs.tf + └── .tftest.hcl # Test file lives alongside module +``` + +### File Naming Convention + +| Pattern | Description | +|---------|-------------| +| `*.tftest.hcl` | OpenTofu test files | +| `mock_provider` | Provider mock declarations | + +### Example Infrastructure Test + +```hcl +# ============================================================================= +# Unit tests for cloudfront module +# ============================================================================= + +mock_provider "aws" {} + +variables { + distribution_bucket_name = "my-assets-bucket" + distribution_app_name = "my-app-123" + distribution_s3_prefix = "/static" + + network_hosted_zone_id = "Z1234567890" + network_domain = "example.com" + network_subdomain = "app" + + distribution_resource_tags_json = { + Environment = "test" + } +} + +# ============================================================================= +# Test: CloudFront distribution is created with correct origin +# ============================================================================= +run "cloudfront_has_correct_s3_origin" { + command = plan + + assert { + condition = aws_cloudfront_distribution.static.origin[0].domain_name != "" + error_message = "CloudFront distribution must have an S3 origin" + } +} + +# ============================================================================= +# Test: Origin Access Control is configured +# ============================================================================= +run "oac_is_configured" { + command = plan + + assert { + condition = aws_cloudfront_origin_access_control.static.signing_behavior == "always" + error_message = "OAC should always sign requests" + } +} + +# ============================================================================= +# Test: Custom error responses for SPA routing +# ============================================================================= +run "spa_error_responses_configured" { + command = plan + + assert { + condition = length(aws_cloudfront_distribution.static.custom_error_response) > 0 + error_message = "SPA should have custom error responses for client-side routing" + } +} +``` + +--- + +## Integration Tests + +Integration tests validate the complete deployment workflow end-to-end. They run in a containerized environment with mocked cloud services, testing the entire pipeline from context building through infrastructure provisioning. + +### What to Test + +- **Complete workflows**: Full deployment and destruction cycles +- **Service interactions**: AWS services, nullplatform API calls +- **Resource creation**: Verify infrastructure is created correctly +- **Cleanup**: Ensure resources are properly destroyed + +### Architecture + +``` +┌─ Host Machine ──────────────────────────────────────────────────────────────┐ +│ │ +│ make test-integration │ +│ │ │ +│ ▼ │ +│ run_integration_tests.sh ──► docker compose up │ +│ │ +└─────────────────────────────────┬───────────────────────────────────────────┘ + │ +┌─ Docker Network ────────────────┴───────────────────────────────────────────┐ +│ │ +│ ┌─ Test Container ───────────────────────────────────────────────────────┐ │ +│ │ │ │ +│ │ BATS Tests ──► np CLI ──────────────────┐ │ │ +│ │ │ │ │ │ +│ │ ▼ ▼ │ │ +│ │ OpenTofu Nginx (HTTPS) │ │ +│ │ │ │ │ │ +│ └───────┼───────────────────────────────────┼────────────────────────────┘ │ +│ │ │ │ +│ ▼ ▼ │ +│ ┌─ Mock Services ────────────────────────────────────────────────────────┐ │ +│ │ │ │ +│ │ LocalStack (4566) Moto (5555) Smocker (8081) │ │ +│ │ ├── S3 └── CloudFront └── nullplatform API │ │ +│ │ ├── Route53 │ │ +│ │ ├── DynamoDB │ │ +│ │ ├── IAM │ │ +│ │ └── STS │ │ +│ │ │ │ +│ └────────────────────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +### Service Components + +| Service | Purpose | Port | +|---------|---------|------| +| **LocalStack** | AWS service emulation (S3, Route53, DynamoDB, IAM, STS, ACM) | 4566 | +| **Moto** | CloudFront emulation (not supported in LocalStack free tier) | 5555 | +| **Smocker** | nullplatform API mocking | 8080/8081 | +| **Nginx** | HTTPS reverse proxy for np CLI | 8443 | + +### Directory Structure + +``` +/ +└── tests/ + └── integration/ + ├── cloudfront_lifecycle_test.bats # Integration test + ├── localstack/ + │ └── provider_override.tf # LocalStack-compatible provider config + └── mocks/ + └── / + └── response.json # Mock API responses +``` + +### File Naming Convention + +| Pattern | Description | +|---------|-------------| +| `*_test.bats` | Integration test files | +| `localstack/` | LocalStack-compatible Terraform overrides | +| `mocks/` | API mock response files | + +### Example Integration Test + +```bash +#!/usr/bin/env bats +# ============================================================================= +# Integration test: CloudFront Distribution Lifecycle +# ============================================================================= + +setup_file() { + source "${PROJECT_ROOT}/testing/integration_helpers.sh" + + # Clear any existing mocks + clear_mocks + + # Create AWS prerequisites in LocalStack + aws_local s3api create-bucket --bucket assets-bucket + aws_local s3api create-bucket --bucket tofu-state-bucket + aws_local dynamodb create-table \ + --table-name tofu-locks \ + --attribute-definitions AttributeName=LockID,AttributeType=S \ + --key-schema AttributeName=LockID,KeyType=HASH \ + --billing-mode PAY_PER_REQUEST + aws_local route53 create-hosted-zone \ + --name example.com \ + --caller-reference "test-$(date +%s)" +} + +teardown_file() { + source "${PROJECT_ROOT}/testing/integration_helpers.sh" + clear_mocks +} + +setup() { + source "${PROJECT_ROOT}/testing/integration_helpers.sh" + + clear_mocks + load_context "tests/resources/context.json" + + export TOFU_PROVIDER="aws" + export TOFU_PROVIDER_BUCKET="tofu-state-bucket" + export AWS_REGION="us-east-1" +} + +# ============================================================================= +# Test: Create Infrastructure +# ============================================================================= +@test "create infrastructure deploys S3, CloudFront, and Route53 resources" { + # Setup API mocks + mock_request "GET" "/provider" "mocks/provider_success.json" + + # Run the deployment workflow + run_workflow "deployment/workflows/initial.yaml" + + # Verify resources were created + assert_s3_bucket_exists "assets-bucket" + assert_cloudfront_exists "Distribution for my-app" + assert_route53_record_exists "app.example.com" "A" +} + +# ============================================================================= +# Test: Destroy Infrastructure +# ============================================================================= +@test "destroy infrastructure removes CloudFront and Route53 resources" { + mock_request "GET" "/provider" "mocks/provider_success.json" + + run_workflow "deployment/workflows/delete.yaml" + + assert_cloudfront_not_exists "Distribution for my-app" + assert_route53_record_not_exists "app.example.com" "A" +} +``` + +--- + +## Test Helpers Reference + +### Viewing Available Helpers + +Both helper libraries include a `test_help` function that displays all available utilities: + +```bash +# View unit test helpers +source testing/assertions.sh && test_help + +# View integration test helpers +source testing/integration_helpers.sh && test_help +``` + +### Unit Test Assertions (`testing/assertions.sh`) + +| Function | Description | +|----------|-------------| +| `assert_equal "$actual" "$expected"` | Assert two values are equal | +| `assert_contains "$haystack" "$needle"` | Assert string contains substring | +| `assert_not_empty "$value" ["$name"]` | Assert value is not empty | +| `assert_empty "$value" ["$name"]` | Assert value is empty | +| `assert_file_exists "$path"` | Assert file exists | +| `assert_directory_exists "$path"` | Assert directory exists | +| `assert_json_equal "$actual" "$expected"` | Assert JSON structures are equal | + +### Integration Test Helpers (`testing/integration_helpers.sh`) + +#### AWS Commands + +| Function | Description | +|----------|-------------| +| `aws_local ` | Execute AWS CLI against LocalStack | +| `aws_moto ` | Execute AWS CLI against Moto (CloudFront) | + +#### Workflow Execution + +| Function | Description | +|----------|-------------| +| `run_workflow "$path"` | Run a nullplatform workflow file | + +#### Context Management + +| Function | Description | +|----------|-------------| +| `load_context "$path"` | Load context JSON into `$CONTEXT` | +| `override_context "$key" "$value"` | Override a value in current context | + +#### API Mocking + +| Function | Description | +|----------|-------------| +| `clear_mocks` | Clear all mocks, set up defaults | +| `mock_request "$method" "$path" "$file"` | Mock API request with file response | +| `mock_request "$method" "$path" $status '$body'` | Mock API request inline | +| `assert_mock_called "$method" "$path"` | Assert mock was called | + +#### AWS Assertions + +| Function | Description | +|----------|-------------| +| `assert_s3_bucket_exists "$bucket"` | Assert S3 bucket exists | +| `assert_s3_bucket_not_exists "$bucket"` | Assert S3 bucket doesn't exist | +| `assert_cloudfront_exists "$comment"` | Assert CloudFront distribution exists | +| `assert_cloudfront_not_exists "$comment"` | Assert CloudFront distribution doesn't exist | +| `assert_route53_record_exists "$name" "$type"` | Assert Route53 record exists | +| `assert_route53_record_not_exists "$name" "$type"` | Assert Route53 record doesn't exist | +| `assert_dynamodb_table_exists "$table"` | Assert DynamoDB table exists | + +--- + +## Writing New Tests + +### Unit Test Checklist + +1. Create test file: `/tests//_test.bats` +2. Add `setup()` function that sources `testing/assertions.sh` +3. Set up required environment variables and mocks +4. Write tests using `@test "description" { ... }` syntax +5. Use `run` to capture command output and exit status +6. Assert with helper functions or standard bash conditionals + +### Infrastructure Test Checklist + +1. Create test file: `/modules/.tftest.hcl` +2. Add `mock_provider "aws" {}` to avoid real API calls +3. Define `variables {}` block with test inputs +4. Write `run "test_name" { ... }` blocks with assertions +5. Use `command = plan` to validate without applying + +### Integration Test Checklist + +1. Create test file: `/tests/integration/_test.bats` +2. Add `setup_file()` to create prerequisites in LocalStack +3. Add `setup()` to configure mocks and context per test +4. Add `teardown_file()` to clean up +5. Create `localstack/provider_override.tf` for LocalStack-compatible provider +6. Create mock response files in `mocks/` directory +7. Use `run_workflow` to execute deployment workflows +8. Assert with AWS assertion helpers + +--- + +## Extending Test Helpers + +### Adding New Assertions + +1. **Add the function** to the appropriate helper file: + - `testing/assertions.sh` for unit test helpers + - `testing/integration_helpers.sh` for integration test helpers + +2. **Follow the naming convention**: `assert_` for assertions + +3. **Update the `test_help` function** to document your new helper: + +```bash +# Example: Adding a new assertion to assertions.sh + +# Add the function +assert_file_contains() { + local file="$1" + local content="$2" + if ! grep -q "$content" "$file" 2>/dev/null; then + echo "Expected file '$file' to contain: $content" + return 1 + fi +} + +# Update test_help() - add to the appropriate section +test_help() { + cat <<'EOF' +... +FILE SYSTEM ASSERTIONS +---------------------- + assert_file_exists "" + Assert a file exists. + + assert_file_contains "" "" # <-- Add documentation + Assert a file contains specific content. +... +EOF +} +``` + +4. **Test your new helper** before committing + +### Helper Design Guidelines + +- Return `0` on success, non-zero on failure +- Print descriptive error messages on failure +- Keep functions focused and single-purpose +- Use consistent naming conventions +- Document parameters and usage in `test_help()` + +--- + +## Troubleshooting + +### Common Issues + +| Issue | Solution | +|-------|----------| +| `bats: command not found` | Install bats-core: `brew install bats-core` | +| `tofu: command not found` | Install OpenTofu: `brew install opentofu` | +| Integration tests hang | Check Docker is running, increase timeout | +| LocalStack services not ready | Wait for health checks, check Docker logs | +| Mock not being called | Verify mock path matches exactly, check Smocker logs | + +### Debugging Integration Tests + +```bash +# View LocalStack logs +docker logs integration-localstack + +# View Smocker mock history +curl http://localhost:8081/history | jq + +# Run tests with verbose output +bats --show-output-of-passing-tests frontend/deployment/tests/integration/*.bats +``` + +--- + +## Additional Resources + +- [BATS Documentation](https://bats-core.readthedocs.io/) +- [OpenTofu Testing](https://opentofu.org/docs/cli/commands/test/) +- [LocalStack Documentation](https://docs.localstack.cloud/) +- [Smocker Documentation](https://smocker.dev/) diff --git a/testing/assertions.sh b/testing/assertions.sh new file mode 100644 index 00000000..ab36c582 --- /dev/null +++ b/testing/assertions.sh @@ -0,0 +1,324 @@ +# ============================================================================= +# Shared assertion functions for BATS tests +# +# Usage: Add this line at the top of your .bats file's setup() function: +# source "$PROJECT_ROOT/testing/assertions.sh" +# ============================================================================= + +# ============================================================================= +# Assertion functions +# ============================================================================= +assert_equal() { + local actual="$1" + local expected="$2" + if [ "$actual" != "$expected" ]; then + echo "Expected: '$expected'" + echo "Actual: '$actual'" + return 1 + fi +} + +assert_contains() { + local haystack="$1" + local needle="$2" + if [[ "$haystack" != *"$needle"* ]]; then + echo "Expected string to contain: '$needle'" + echo "Actual: '$haystack'" + return 1 + fi +} + +assert_not_empty() { + local value="$1" + local name="${2:-value}" + if [ -z "$value" ]; then + echo "Expected $name to be non-empty, but it was empty" + return 1 + fi +} + +assert_empty() { + local value="$1" + local name="${2:-value}" + if [ -n "$value" ]; then + echo "Expected $name to be empty" + echo "Actual: '$value'" + return 1 + fi +} + +assert_true() { + local value="$1" + local name="${2:-value}" + if [[ "$value" != "true" ]]; then + echo "Expected $name to be true" + echo "Actual: '$value'" + return 1 + fi +} + +assert_false() { + local value="$1" + local name="${2:-value}" + if [[ "$value" != "false" ]]; then + echo "Expected $name to be false" + echo "Actual: '$value'" + return 1 + fi +} + +assert_greater_than() { + local actual="$1" + local expected="$2" + local name="${3:-value}" + if [[ ! "$actual" -gt "$expected" ]]; then + echo "Expected $name to be greater than $expected" + echo "Actual: '$actual'" + return 1 + fi +} + +assert_less_than() { + local actual="$1" + local expected="$2" + local name="${3:-value}" + if [[ ! "$actual" -lt "$expected" ]]; then + echo "Expected $name to be less than $expected" + echo "Actual: '$actual'" + return 1 + fi +} + +# Assert that commands appear in a specific order in a log file +# Usage: assert_command_order "" "command1" "command2" ["command3" ...] +# Example: assert_command_order "$LOG_FILE" "init" "apply" +assert_command_order() { + local log_file="$1" + shift + local commands=("$@") + + if [[ ${#commands[@]} -lt 2 ]]; then + echo "assert_command_order requires at least 2 commands" + return 1 + fi + + if [[ ! -f "$log_file" ]]; then + echo "Log file not found: $log_file" + return 1 + fi + + local prev_line=0 + local prev_cmd="" + + for cmd in "${commands[@]}"; do + local line_num + line_num=$(grep -n "$cmd" "$log_file" | head -1 | cut -d: -f1) + + if [[ -z "$line_num" ]]; then + echo "Command '$cmd' not found in log file" + return 1 + fi + + if [[ $prev_line -gt 0 ]] && [[ $line_num -le $prev_line ]]; then + echo "Expected: '$cmd'" + echo "To be executed after: '$prev_cmd'" + + echo "Actual execution order:" + echo " '$prev_cmd' at line $prev_line" + echo " '$cmd' at line $line_num" + return 1 + fi + + prev_line=$line_num + prev_cmd=$cmd + done +} + +assert_directory_exists() { + local dir="$1" + if [ ! -d "$dir" ]; then + echo "Expected directory to exist: '$dir'" + return 1 + fi +} + +assert_file_exists() { + local file="$1" + if [ ! -f "$file" ]; then + echo "Expected file to exist: '$file'" + return 1 + fi +} + +assert_file_not_exists() { + local file="$1" + if [ -f "$file" ]; then + echo "Expected file to not exist: '$file'" + return 1 + fi +} + +assert_json_equal() { + local actual="$1" + local expected="$2" + local name="${3:-JSON}" + + local actual_sorted=$(echo "$actual" | jq -S .) + local expected_sorted=$(echo "$expected" | jq -S .) + + if [ "$actual_sorted" != "$expected_sorted" ]; then + echo "$name does not match expected structure" + echo "" + echo "Diff:" + diff <(echo "$expected_sorted") <(echo "$actual_sorted") || true + echo "" + echo "Expected:" + echo "$expected_sorted" + echo "" + echo "Actual:" + echo "$actual_sorted" + echo "" + return 1 + fi +} + +# ============================================================================= +# Mock helpers +# ============================================================================= + +# Set up a mock response for the np CLI +# Usage: set_np_mock "" [exit_code] +set_np_mock() { + local mock_file="$1" + local exit_code="${2:-0}" + export NP_MOCK_RESPONSE="$mock_file" + export NP_MOCK_EXIT_CODE="$exit_code" +} + + +# Set up a mock response for the aws CLI +# Usage: set_aws_mock "" [exit_code] +# Requires: AWS_MOCKS_DIR to be set in the test setup +set_aws_mock() { + local mock_file="$1" + local exit_code="${2:-0}" + export AWS_MOCK_RESPONSE="$mock_file" + export AWS_MOCK_EXIT_CODE="$exit_code" +} + +# Set up a mock response for the az CLI +# Usage: set_az_mock "" [exit_code] +# Requires: AZURE_MOCKS_DIR to be set in the test setup +set_az_mock() { + local mock_file="$1" + local exit_code="${2:-0}" + export AZ_MOCK_RESPONSE="$mock_file" + export AZ_MOCK_EXIT_CODE="$exit_code" +} + +# ============================================================================= +# Help / Documentation +# ============================================================================= + +# Display help for all available unit test assertion utilities +test_help() { + cat <<'EOF' +================================================================================ + Unit Test Assertions Reference +================================================================================ + +VALUE ASSERTIONS +---------------- + assert_equal "" "" + Assert two string values are equal. + Example: assert_equal "$result" "expected_value" + + assert_contains "" "" + Assert a string contains a substring. + Example: assert_contains "$output" "success" + + assert_not_empty "" [""] + Assert a value is not empty. + Example: assert_not_empty "$result" "API response" + + assert_empty "" [""] + Assert a value is empty. + Example: assert_empty "$error" "error message" + + assert_true "" [""] + Assert a value equals the string "true". + Example: assert_true "$enabled" "distribution enabled" + + assert_false "" [""] + Assert a value equals the string "false". + Example: assert_false "$disabled" "feature disabled" + +NUMERIC ASSERTIONS +------------------ + assert_greater_than "" "" [""] + Assert a number is greater than another. + Example: assert_greater_than "$count" "0" "item count" + + assert_less_than "" "" [""] + Assert a number is less than another. + Example: assert_less_than "$errors" "10" "error count" + +COMMAND ORDER ASSERTIONS +------------------------ + assert_command_order "" "cmd1" "cmd2" ["cmd3" ...] + Assert commands appear in order in a log file. + Example: assert_command_order "$LOG" "init" "apply" "output" + +FILE SYSTEM ASSERTIONS +---------------------- + assert_file_exists "" + Assert a file exists. + Example: assert_file_exists "/tmp/output.json" + + assert_file_not_exists "" + Assert a file does not exist. + Example: assert_file_not_exists "/tmp/should_not_exist.json" + + assert_directory_exists "" + Assert a directory exists. + Example: assert_directory_exists "/tmp/output" + +JSON ASSERTIONS +--------------- + assert_json_equal "" "" [""] + Assert two JSON structures are equal (order-independent). + Example: assert_json_equal "$response" '{"status": "ok"}' + +MOCK HELPERS +------------ + set_np_mock "" [exit_code] + Set up a mock response for the np CLI. + Example: set_np_mock "$MOCKS_DIR/provider/success.json" + + set_aws_mock "" [exit_code] + Set up a mock response for the aws CLI. + Example: set_aws_mock "$MOCKS_DIR/route53/success.json" + +BATS BUILT-IN HELPERS +--------------------- + run + Run a command and capture output in $output and exit code in $status. + Example: run my_function "arg1" "arg2" + + [ "$status" -eq 0 ] + Check exit code after 'run'. + + [[ "$output" == *"expected"* ]] + Check output contains expected string. + +USAGE IN TESTS +-------------- + Add this to your test file's setup() function: + + setup() { + source "$PROJECT_ROOT/testing/assertions.sh" + } + +================================================================================ +EOF +} diff --git a/testing/azure-mock-provider/backend_override.tf b/testing/azure-mock-provider/backend_override.tf new file mode 100644 index 00000000..8a04e28e --- /dev/null +++ b/testing/azure-mock-provider/backend_override.tf @@ -0,0 +1,9 @@ +# Backend override for Azure Mock testing +# This configures the azurerm backend to use the mock blob storage + +terraform { + backend "azurerm" { + # These values are overridden at runtime via -backend-config flags + # but we need a backend block for terraform to accept them + } +} diff --git a/testing/azure-mock-provider/provider_override.tf b/testing/azure-mock-provider/provider_override.tf new file mode 100644 index 00000000..6b1a4406 --- /dev/null +++ b/testing/azure-mock-provider/provider_override.tf @@ -0,0 +1,32 @@ +# Override file for Azure Mock testing +# This file is copied into the module directory during integration tests +# to configure the Azure provider to use mock endpoints +# +# This is analogous to the LocalStack provider override for AWS tests. +# +# Azure Mock (port 8080): ARM APIs (CDN, DNS, Storage) + Blob Storage API + +provider "azurerm" { + features {} + + # Test subscription ID (mock doesn't validate this) + subscription_id = "mock-subscription-id" + + # Skip provider registration (not needed for mock) + skip_provider_registration = true + + # Use client credentials with mock values + # The mock server accepts any credentials + client_id = "mock-client-id" + client_secret = "mock-client-secret" + tenant_id = "mock-tenant-id" + + # Disable all authentication methods except client credentials + use_msi = false + use_cli = false + use_oidc = false + + default_tags { + tags = var.resource_tags + } +} diff --git a/testing/docker/Dockerfile.test-runner b/testing/docker/Dockerfile.test-runner new file mode 100644 index 00000000..4323fbdb --- /dev/null +++ b/testing/docker/Dockerfile.test-runner @@ -0,0 +1,47 @@ +# ============================================================================= +# Integration Test Runner Container +# +# Contains all tools needed to run integration tests: +# - bats-core (test framework) +# - aws-cli (for LocalStack/Moto assertions) +# - azure-cli (for Azure API calls) +# - jq (JSON processing) +# - curl (HTTP requests) +# - np CLI (nullplatform CLI) +# - opentofu (infrastructure as code) +# ============================================================================= + +FROM alpine:3.19 + +# Install base dependencies +RUN apk add --no-cache \ + bash \ + curl \ + jq \ + git \ + openssh \ + docker-cli \ + aws-cli \ + ca-certificates \ + ncurses \ + python3 \ + py3-pip + +# Install bats-core +RUN apk add --no-cache bats + +# Install OpenTofu +RUN apk add --no-cache --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community opentofu + +# Install Azure CLI +RUN pip3 install --break-system-packages azure-cli + +# Install nullplatform CLI and add to PATH +RUN curl -fsSL https://cli.nullplatform.com/install.sh | sh +ENV PATH="/root/.local/bin:${PATH}" + +# Create workspace directory +WORKDIR /workspace + +# Default command - run bats tests +ENTRYPOINT ["/bin/bash"] diff --git a/testing/docker/azure-mock/Dockerfile b/testing/docker/azure-mock/Dockerfile new file mode 100644 index 00000000..0e3d902e --- /dev/null +++ b/testing/docker/azure-mock/Dockerfile @@ -0,0 +1,44 @@ +# Azure Mock API Server +# +# Lightweight mock server that implements Azure REST API endpoints +# for integration testing without requiring real Azure resources. +# +# Build: +# docker build -t azure-mock . +# +# Run: +# docker run -p 8080:8080 azure-mock + +FROM golang:1.21-alpine AS builder + +WORKDIR /app + +# Copy go mod files +COPY go.mod ./ + +# Copy source code +COPY main.go ./ + +# Build the binary +RUN CGO_ENABLED=0 GOOS=linux go build -o azure-mock . + +# Final stage - minimal image +FROM alpine:3.19 + +# Add ca-certificates for HTTPS (if needed) and curl for healthcheck +RUN apk --no-cache add ca-certificates curl + +WORKDIR /app + +# Copy binary from builder +COPY --from=builder /app/azure-mock . + +# Expose port +EXPOSE 8080 + +# Health check +HEALTHCHECK --interval=5s --timeout=3s --retries=10 \ + CMD curl -f http://localhost:8080/health || exit 1 + +# Run the server +CMD ["./azure-mock"] diff --git a/testing/docker/azure-mock/go.mod b/testing/docker/azure-mock/go.mod new file mode 100644 index 00000000..a2f2e22e --- /dev/null +++ b/testing/docker/azure-mock/go.mod @@ -0,0 +1,3 @@ +module azure-mock + +go 1.21 diff --git a/testing/docker/azure-mock/main.go b/testing/docker/azure-mock/main.go new file mode 100644 index 00000000..57c81baf --- /dev/null +++ b/testing/docker/azure-mock/main.go @@ -0,0 +1,3669 @@ +// Azure Mock API Server +// +// A lightweight mock server that implements Azure REST API endpoints +// for integration testing. Supports: +// - Azure CDN (profiles and endpoints) +// - Azure DNS (zones and CNAME records) +// - Azure Storage Accounts (read-only for data source) +// +// Usage: +// +// docker run -p 8080:8080 azure-mock +// +// Configure Terraform azurerm provider to use this endpoint. +package main + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "regexp" + "strings" + "sync" + "time" +) + +// ============================================================================= +// In-Memory Store +// ============================================================================= + +type Store struct { + mu sync.RWMutex + cdnProfiles map[string]CDNProfile + cdnEndpoints map[string]CDNEndpoint + cdnCustomDomains map[string]CDNCustomDomain + dnsZones map[string]DNSZone + dnsCNAMERecords map[string]DNSCNAMERecord + storageAccounts map[string]StorageAccount + blobContainers map[string]BlobContainer // key: accountName/containerName + blobs map[string]Blob // key: accountName/containerName/blobName + blobBlocks map[string][]byte // key: blobKey/blockId - staged blocks for block blob uploads + // App Service resources + appServicePlans map[string]AppServicePlan + linuxWebApps map[string]LinuxWebApp + webAppSlots map[string]WebAppSlot + logAnalyticsWorkspaces map[string]LogAnalyticsWorkspace + appInsights map[string]ApplicationInsights + autoscaleSettings map[string]AutoscaleSetting + actionGroups map[string]ActionGroup + metricAlerts map[string]MetricAlert + diagnosticSettings map[string]DiagnosticSetting + trafficRouting map[string][]TrafficRoutingRule +} + +// TrafficRoutingRule represents a traffic routing rule for a slot +type TrafficRoutingRule struct { + ActionHostName string `json:"actionHostName"` + ReroutePercentage int `json:"reroutePercentage"` + Name string `json:"name"` +} + +func NewStore() *Store { + return &Store{ + cdnProfiles: make(map[string]CDNProfile), + cdnEndpoints: make(map[string]CDNEndpoint), + cdnCustomDomains: make(map[string]CDNCustomDomain), + dnsZones: make(map[string]DNSZone), + dnsCNAMERecords: make(map[string]DNSCNAMERecord), + storageAccounts: make(map[string]StorageAccount), + blobContainers: make(map[string]BlobContainer), + blobs: make(map[string]Blob), + blobBlocks: make(map[string][]byte), + appServicePlans: make(map[string]AppServicePlan), + linuxWebApps: make(map[string]LinuxWebApp), + webAppSlots: make(map[string]WebAppSlot), + logAnalyticsWorkspaces: make(map[string]LogAnalyticsWorkspace), + appInsights: make(map[string]ApplicationInsights), + autoscaleSettings: make(map[string]AutoscaleSetting), + actionGroups: make(map[string]ActionGroup), + metricAlerts: make(map[string]MetricAlert), + diagnosticSettings: make(map[string]DiagnosticSetting), + trafficRouting: make(map[string][]TrafficRoutingRule), + } +} + +// ============================================================================= +// Azure Resource Models +// ============================================================================= + +// CDN Profile +type CDNProfile struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Sku CDNSku `json:"sku"` + Properties CDNProfileProps `json:"properties"` +} + +type CDNSku struct { + Name string `json:"name"` +} + +type CDNProfileProps struct { + ResourceState string `json:"resourceState"` + ProvisioningState string `json:"provisioningState"` +} + +// CDN Endpoint +type CDNEndpoint struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Properties CDNEndpointProps `json:"properties"` +} + +// CDN Custom Domain +type CDNCustomDomain struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Properties CDNCustomDomainProps `json:"properties"` +} + +type CDNCustomDomainProps struct { + HostName string `json:"hostName"` + ResourceState string `json:"resourceState"` + ProvisioningState string `json:"provisioningState"` + ValidationData string `json:"validationData,omitempty"` +} + +type CDNEndpointProps struct { + HostName string `json:"hostName"` + OriginHostHeader string `json:"originHostHeader,omitempty"` + Origins []CDNOrigin `json:"origins"` + OriginPath string `json:"originPath,omitempty"` + IsHttpAllowed bool `json:"isHttpAllowed"` + IsHttpsAllowed bool `json:"isHttpsAllowed"` + IsCompressionEnabled bool `json:"isCompressionEnabled"` + ResourceState string `json:"resourceState"` + ProvisioningState string `json:"provisioningState"` + DeliveryPolicy *CDNDeliveryPolicy `json:"deliveryPolicy,omitempty"` +} + +type CDNOrigin struct { + Name string `json:"name"` + Properties CDNOriginProps `json:"properties"` +} + +type CDNOriginProps struct { + HostName string `json:"hostName"` + HttpPort int `json:"httpPort,omitempty"` + HttpsPort int `json:"httpsPort,omitempty"` +} + +type CDNDeliveryPolicy struct { + Rules []CDNDeliveryRule `json:"rules,omitempty"` +} + +type CDNDeliveryRule struct { + Name string `json:"name"` + Order int `json:"order"` + Actions []interface{} `json:"actions,omitempty"` +} + +// DNS Zone +type DNSZone struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Properties DNSZoneProps `json:"properties"` +} + +type DNSZoneProps struct { + MaxNumberOfRecordSets int `json:"maxNumberOfRecordSets"` + NumberOfRecordSets int `json:"numberOfRecordSets"` + NameServers []string `json:"nameServers"` +} + +// DNS CNAME Record +type DNSCNAMERecord struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Etag string `json:"etag,omitempty"` + Properties DNSCNAMERecordProps `json:"properties"` +} + +type DNSCNAMERecordProps struct { + TTL int `json:"TTL"` + Fqdn string `json:"fqdn,omitempty"` + CNAMERecord *DNSCNAMEValue `json:"CNAMERecord,omitempty"` +} + +type DNSCNAMEValue struct { + Cname string `json:"cname"` +} + +// Storage Account +type StorageAccount struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Kind string `json:"kind"` + Sku StorageSku `json:"sku"` + Properties StorageAccountProps `json:"properties"` +} + +type StorageSku struct { + Name string `json:"name"` + Tier string `json:"tier"` +} + +type StorageAccountProps struct { + PrimaryEndpoints StorageEndpoints `json:"primaryEndpoints"` + ProvisioningState string `json:"provisioningState"` +} + +type StorageEndpoints struct { + Blob string `json:"blob"` + Web string `json:"web"` +} + +// Blob Storage Container +type BlobContainer struct { + Name string `json:"name"` + Properties BlobContainerProps `json:"properties"` +} + +type BlobContainerProps struct { + LastModified string `json:"lastModified"` + Etag string `json:"etag"` +} + +// Blob +type Blob struct { + Name string `json:"name"` + Content []byte `json:"-"` + Properties BlobProps `json:"properties"` + Metadata map[string]string `json:"-"` // x-ms-meta-* headers +} + +type BlobProps struct { + LastModified string `json:"lastModified"` + Etag string `json:"etag"` + ContentLength int `json:"contentLength"` + ContentType string `json:"contentType"` +} + +// ============================================================================= +// App Service Models +// ============================================================================= + +// App Service Plan (serverfarms) +type AppServicePlan struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Kind string `json:"kind,omitempty"` + Sku AppServiceSku `json:"sku"` + Properties AppServicePlanProps `json:"properties"` +} + +type AppServiceSku struct { + Name string `json:"name"` + Tier string `json:"tier"` + Size string `json:"size"` + Family string `json:"family"` + Capacity int `json:"capacity"` +} + +type AppServicePlanProps struct { + ProvisioningState string `json:"provisioningState"` + Status string `json:"status"` + MaximumNumberOfWorkers int `json:"maximumNumberOfWorkers"` + NumberOfSites int `json:"numberOfSites"` + PerSiteScaling bool `json:"perSiteScaling"` + ZoneRedundant bool `json:"zoneRedundant"` + Reserved bool `json:"reserved"` // true for Linux +} + +// Linux Web App (sites) +type LinuxWebApp struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Kind string `json:"kind,omitempty"` + Identity *AppIdentity `json:"identity,omitempty"` + Properties LinuxWebAppProps `json:"properties"` +} + +type AppIdentity struct { + Type string `json:"type"` + PrincipalID string `json:"principalId,omitempty"` + TenantID string `json:"tenantId,omitempty"` + UserIDs map[string]string `json:"userAssignedIdentities,omitempty"` +} + +type LinuxWebAppProps struct { + ProvisioningState string `json:"provisioningState"` + State string `json:"state"` + DefaultHostName string `json:"defaultHostName"` + ServerFarmID string `json:"serverFarmId"` + HTTPSOnly bool `json:"httpsOnly"` + ClientAffinityEnabled bool `json:"clientAffinityEnabled"` + OutboundIPAddresses string `json:"outboundIpAddresses"` + PossibleOutboundIPAddresses string `json:"possibleOutboundIpAddresses"` + CustomDomainVerificationID string `json:"customDomainVerificationId"` + SiteConfig *WebAppSiteConfig `json:"siteConfig,omitempty"` +} + +type WebAppSiteConfig struct { + AlwaysOn bool `json:"alwaysOn"` + HTTP20Enabled bool `json:"http20Enabled"` + WebSocketsEnabled bool `json:"webSocketsEnabled"` + FtpsState string `json:"ftpsState"` + MinTLSVersion string `json:"minTlsVersion"` + LinuxFxVersion string `json:"linuxFxVersion"` + AppCommandLine string `json:"appCommandLine,omitempty"` + HealthCheckPath string `json:"healthCheckPath,omitempty"` + VnetRouteAllEnabled bool `json:"vnetRouteAllEnabled"` + AutoHealEnabled bool `json:"autoHealEnabled"` + Experiments *WebAppExperiments `json:"experiments,omitempty"` +} + +// WebAppExperiments contains traffic routing configuration +type WebAppExperiments struct { + RampUpRules []RampUpRule `json:"rampUpRules,omitempty"` +} + +// RampUpRule defines traffic routing to a deployment slot +type RampUpRule struct { + ActionHostName string `json:"actionHostName"` + ReroutePercentage float64 `json:"reroutePercentage"` + Name string `json:"name"` +} + +// Web App Slot +type WebAppSlot struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Kind string `json:"kind,omitempty"` + Properties LinuxWebAppProps `json:"properties"` +} + +// Log Analytics Workspace +type LogAnalyticsWorkspace struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Properties LogAnalyticsWorkspaceProps `json:"properties"` +} + +type LogAnalyticsWorkspaceProps struct { + ProvisioningState string `json:"provisioningState"` + CustomerID string `json:"customerId"` + Sku struct { + Name string `json:"name"` + } `json:"sku"` + RetentionInDays int `json:"retentionInDays"` +} + +// Application Insights +type ApplicationInsights struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Kind string `json:"kind"` + Properties ApplicationInsightsProps `json:"properties"` +} + +type ApplicationInsightsProps struct { + ProvisioningState string `json:"provisioningState"` + ApplicationID string `json:"AppId"` + InstrumentationKey string `json:"InstrumentationKey"` + ConnectionString string `json:"ConnectionString"` + WorkspaceResourceID string `json:"WorkspaceResourceId,omitempty"` +} + +// Monitor Autoscale Settings +type AutoscaleSetting struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Properties AutoscaleSettingProps `json:"properties"` +} + +type AutoscaleSettingProps struct { + ProvisioningState string `json:"provisioningState,omitempty"` + Enabled bool `json:"enabled"` + TargetResourceURI string `json:"targetResourceUri"` + TargetResourceLocation string `json:"targetResourceLocation,omitempty"` + Profiles []interface{} `json:"profiles"` + Notifications []interface{} `json:"notifications,omitempty"` +} + +// Monitor Action Group +type ActionGroup struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Properties ActionGroupProps `json:"properties"` +} + +type ActionGroupProps struct { + GroupShortName string `json:"groupShortName"` + Enabled bool `json:"enabled"` + EmailReceivers []interface{} `json:"emailReceivers,omitempty"` + WebhookReceivers []interface{} `json:"webhookReceivers,omitempty"` +} + +// Monitor Metric Alert +type MetricAlert struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Location string `json:"location"` + Tags map[string]string `json:"tags,omitempty"` + Properties MetricAlertProps `json:"properties"` +} + +type MetricAlertProps struct { + Description string `json:"description,omitempty"` + Severity int `json:"severity"` + Enabled bool `json:"enabled"` + Scopes []string `json:"scopes"` + EvaluationFrequency string `json:"evaluationFrequency"` + WindowSize string `json:"windowSize"` + Criteria interface{} `json:"criteria"` + Actions []interface{} `json:"actions,omitempty"` +} + +// Diagnostic Settings (nested resource) +type DiagnosticSetting struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Properties DiagnosticSettingProps `json:"properties"` +} + +type DiagnosticSettingProps struct { + WorkspaceID string `json:"workspaceId,omitempty"` + Logs []interface{} `json:"logs,omitempty"` + Metrics []interface{} `json:"metrics,omitempty"` +} + +// Azure Error Response +type AzureError struct { + Error AzureErrorDetail `json:"error"` +} + +type AzureErrorDetail struct { + Code string `json:"code"` + Message string `json:"message"` +} + +// ============================================================================= +// Server +// ============================================================================= + +type Server struct { + store *Store +} + +func NewServer() *Server { + return &Server{ + store: NewStore(), + } +} + +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + method := r.Method + host := r.Host + + log.Printf("%s %s (Host: %s)", method, path, host) + + // Health check + if path == "/health" || path == "/" { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{"status": "ok"}) + return + } + + // Check if this is a Blob Storage request (based on Host header) + if strings.Contains(host, ".blob.core.windows.net") { + s.handleBlobStorage(w, r) + return + } + + w.Header().Set("Content-Type", "application/json") + + // OpenID Connect discovery endpoints (required by MSAL/Azure CLI) + if strings.Contains(path, "/.well-known/openid-configuration") { + s.handleOpenIDConfiguration(w, r) + return + } + + // MSAL instance discovery endpoint + if strings.Contains(path, "/common/discovery/instance") || strings.Contains(path, "/discovery/instance") { + s.handleInstanceDiscovery(w, r) + return + } + + // OAuth token endpoint (Azure AD authentication) + if strings.Contains(path, "/oauth2/token") || strings.Contains(path, "/oauth2/v2.0/token") { + s.handleOAuth(w, r) + return + } + + // Subscription endpoint + if matchSubscription(path) { + s.handleSubscription(w, r) + return + } + + // List all providers endpoint (for provider cache) + if matchListProviders(path) { + s.handleListProviders(w, r) + return + } + + // Provider registration endpoint + if matchProviderRegistration(path) { + s.handleProviderRegistration(w, r) + return + } + + // Route to appropriate handler + // Note: More specific routes must come first (operationresults before enableCustomHttps before customDomain, customDomain before endpoint) + switch { + case matchCDNOperationResults(path): + s.handleCDNOperationResults(w, r) + case matchCDNCustomDomainEnableHttps(path): + s.handleCDNCustomDomainHttps(w, r, true) + case matchCDNCustomDomainDisableHttps(path): + s.handleCDNCustomDomainHttps(w, r, false) + case matchCDNCustomDomain(path): + s.handleCDNCustomDomain(w, r) + case matchCDNProfile(path): + s.handleCDNProfile(w, r) + case matchCDNEndpoint(path): + s.handleCDNEndpoint(w, r) + case matchDNSZone(path): + s.handleDNSZone(w, r) + case matchDNSCNAMERecord(path): + s.handleDNSCNAMERecord(w, r) + case matchStorageAccountKeys(path): + s.handleStorageAccountKeys(w, r) + case matchStorageAccount(path): + s.handleStorageAccount(w, r) + // App Service handlers (more specific routes first) + case matchWebAppCheckName(path): + s.handleWebAppCheckName(w, r) + case matchWebAppAuthSettings(path): + s.handleWebAppAuthSettings(w, r) + case matchWebAppAuthSettingsV2(path): + s.handleWebAppAuthSettingsV2(w, r) + case matchWebAppConfigLogs(path): + s.handleWebAppConfigLogs(w, r) + case matchWebAppAppSettings(path): + s.handleWebAppAppSettings(w, r) + case matchWebAppConnStrings(path): + s.handleWebAppConnStrings(w, r) + case matchWebAppStickySettings(path): + s.handleWebAppStickySettings(w, r) + case matchWebAppStorageAccounts(path): + s.handleWebAppStorageAccounts(w, r) + case matchWebAppBackups(path): + s.handleWebAppBackups(w, r) + case matchWebAppMetadata(path): + s.handleWebAppMetadata(w, r) + case matchWebAppPubCreds(path): + s.handleWebAppPubCreds(w, r) + case matchWebAppConfig(path): + // Must be before ConfigFallback - /config/web is more specific than /config/[^/]+ + s.handleWebAppConfig(w, r) + case matchWebAppConfigFallback(path): + s.handleWebAppConfigFallback(w, r) + case matchWebAppBasicAuthPolicy(path): + s.handleWebAppBasicAuthPolicy(w, r) + case matchWebAppSlotConfig(path): + s.handleWebAppSlotConfig(w, r) + case matchWebAppSlotConfigFallback(path): + s.handleWebAppSlotConfigFallback(w, r) + case matchWebAppSlotBasicAuthPolicy(path): + s.handleWebAppSlotBasicAuthPolicy(w, r) + case matchWebAppSlot(path): + s.handleWebAppSlot(w, r) + case matchWebAppTrafficRouting(path): + s.handleWebAppTrafficRouting(w, r) + case matchLinuxWebApp(path): + s.handleLinuxWebApp(w, r) + case matchAppServicePlan(path): + s.handleAppServicePlan(w, r) + // Monitoring handlers + case matchLogAnalytics(path): + s.handleLogAnalytics(w, r) + case matchAppInsights(path): + s.handleAppInsights(w, r) + case matchAutoscaleSetting(path): + s.handleAutoscaleSetting(w, r) + case matchActionGroup(path): + s.handleActionGroup(w, r) + case matchMetricAlert(path): + s.handleMetricAlert(w, r) + case matchDiagnosticSetting(path): + s.handleDiagnosticSetting(w, r) + default: + s.notFound(w, path) + } +} + +// ============================================================================= +// Path Matchers +// ============================================================================= + +var ( + subscriptionRegex = regexp.MustCompile(`^/subscriptions/[^/]+$`) + listProvidersRegex = regexp.MustCompile(`^/subscriptions/[^/]+/providers$`) + providerRegistrationRegex = regexp.MustCompile(`/subscriptions/[^/]+/providers/Microsoft\.[^/]+$`) + cdnProfileRegex = regexp.MustCompile(`/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Cdn/profiles/[^/]+$`) + cdnEndpointRegex = regexp.MustCompile(`/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Cdn/profiles/[^/]+/endpoints/[^/]+$`) + cdnCustomDomainRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Cdn/profiles/[^/]+/endpoints/[^/]+/customDomains/[^/]+$`) + cdnCustomDomainEnableHttpsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Cdn/profiles/[^/]+/endpoints/[^/]+/customDomains/[^/]+/enableCustomHttps$`) + cdnCustomDomainDisableHttpsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Cdn/profiles/[^/]+/endpoints/[^/]+/customDomains/[^/]+/disableCustomHttps$`) + cdnOperationResultsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Cdn/profiles/[^/]+/endpoints/[^/]+/customDomains/[^/]+/operationresults/`) + dnsZoneRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Network/dnszones/[^/]+$`) + dnsCNAMERecordRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Network/dnszones/[^/]+/CNAME/[^/]+$`) + storageAccountRegex = regexp.MustCompile(`/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Storage/storageAccounts/[^/]+$`) + storageAccountKeysRegex = regexp.MustCompile(`/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Storage/storageAccounts/[^/]+/listKeys$`) + // App Service resources + appServicePlanRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/serverfarms/[^/]+$`) + linuxWebAppRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+$`) + webAppSlotRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/slots/[^/]+$`) + webAppSlotConfigRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/slots/[^/]+/config/web$`) + webAppSlotConfigFallbackRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/slots/[^/]+/config/[^/]+(/list)?$`) + webAppSlotBasicAuthPolicyRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/slots/[^/]+/basicPublishingCredentialsPolicies/(ftp|scm)$`) + webAppConfigRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/web$`) + webAppCheckNameRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/providers/Microsoft\.Web/checknameavailability$`) + webAppAuthSettingsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/authsettings/list$`) + webAppAuthSettingsV2Regex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/authsettingsV2/list$`) + webAppConfigLogsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/logs$`) + webAppAppSettingsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/appSettings/list$`) + webAppConnStringsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/connectionstrings/list$`) + webAppStickySettingsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/slotConfigNames$`) + webAppStorageAccountsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/azurestorageaccounts/list$`) + webAppBackupsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/backup/list$`) + webAppMetadataRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/metadata/list$`) + webAppPubCredsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/publishingcredentials/list$`) + webAppConfigFallbackRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/config/[^/]+(/list)?$`) + webAppBasicAuthPolicyRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/basicPublishingCredentialsPolicies/(ftp|scm)$`) + webAppTrafficRoutingRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Web/sites/[^/]+/trafficRouting$`) + // Monitoring resources + logAnalyticsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.OperationalInsights/workspaces/[^/]+$`) + appInsightsRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Insights/components/[^/]+$`) + autoscaleSettingRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Insights/autoscalesettings/[^/]+$`) + actionGroupRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Insights/actionGroups/[^/]+$`) + metricAlertRegex = regexp.MustCompile(`(?i)/subscriptions/[^/]+/resourceGroups/[^/]+/providers/Microsoft\.Insights/metricAlerts/[^/]+$`) + diagnosticSettingRegex = regexp.MustCompile(`(?i)/providers/Microsoft\.Insights/diagnosticSettings/[^/]+$`) +) + +func matchSubscription(path string) bool { return subscriptionRegex.MatchString(path) } +func matchListProviders(path string) bool { return listProvidersRegex.MatchString(path) } +func matchProviderRegistration(path string) bool { return providerRegistrationRegex.MatchString(path) } +func matchCDNProfile(path string) bool { return cdnProfileRegex.MatchString(path) } +func matchCDNEndpoint(path string) bool { return cdnEndpointRegex.MatchString(path) } +func matchCDNCustomDomain(path string) bool { return cdnCustomDomainRegex.MatchString(path) } +func matchCDNCustomDomainEnableHttps(path string) bool { return cdnCustomDomainEnableHttpsRegex.MatchString(path) } +func matchCDNCustomDomainDisableHttps(path string) bool { return cdnCustomDomainDisableHttpsRegex.MatchString(path) } +func matchCDNOperationResults(path string) bool { return cdnOperationResultsRegex.MatchString(path) } +func matchDNSZone(path string) bool { return dnsZoneRegex.MatchString(path) } +func matchDNSCNAMERecord(path string) bool { return dnsCNAMERecordRegex.MatchString(path) } +func matchStorageAccount(path string) bool { return storageAccountRegex.MatchString(path) } +func matchStorageAccountKeys(path string) bool { return storageAccountKeysRegex.MatchString(path) } +// App Service matchers +func matchAppServicePlan(path string) bool { return appServicePlanRegex.MatchString(path) } +func matchLinuxWebApp(path string) bool { return linuxWebAppRegex.MatchString(path) } +func matchWebAppSlot(path string) bool { return webAppSlotRegex.MatchString(path) } +func matchWebAppSlotConfig(path string) bool { return webAppSlotConfigRegex.MatchString(path) } +func matchWebAppSlotConfigFallback(path string) bool { return webAppSlotConfigFallbackRegex.MatchString(path) } +func matchWebAppSlotBasicAuthPolicy(path string) bool { return webAppSlotBasicAuthPolicyRegex.MatchString(path) } +func matchWebAppConfig(path string) bool { return webAppConfigRegex.MatchString(path) } +func matchWebAppCheckName(path string) bool { return webAppCheckNameRegex.MatchString(path) } +func matchWebAppAuthSettings(path string) bool { return webAppAuthSettingsRegex.MatchString(path) } +func matchWebAppAuthSettingsV2(path string) bool { return webAppAuthSettingsV2Regex.MatchString(path) } +func matchWebAppConfigLogs(path string) bool { return webAppConfigLogsRegex.MatchString(path) } +func matchWebAppAppSettings(path string) bool { return webAppAppSettingsRegex.MatchString(path) } +func matchWebAppConnStrings(path string) bool { return webAppConnStringsRegex.MatchString(path) } +func matchWebAppStickySettings(path string) bool { return webAppStickySettingsRegex.MatchString(path) } +func matchWebAppStorageAccounts(path string) bool { return webAppStorageAccountsRegex.MatchString(path) } +func matchWebAppBackups(path string) bool { return webAppBackupsRegex.MatchString(path) } +func matchWebAppMetadata(path string) bool { return webAppMetadataRegex.MatchString(path) } +func matchWebAppPubCreds(path string) bool { return webAppPubCredsRegex.MatchString(path) } +func matchWebAppConfigFallback(path string) bool { return webAppConfigFallbackRegex.MatchString(path) } +func matchWebAppBasicAuthPolicy(path string) bool { return webAppBasicAuthPolicyRegex.MatchString(path) } +func matchWebAppTrafficRouting(path string) bool { return webAppTrafficRoutingRegex.MatchString(path) } +// Monitoring matchers +func matchLogAnalytics(path string) bool { return logAnalyticsRegex.MatchString(path) } +func matchAppInsights(path string) bool { return appInsightsRegex.MatchString(path) } +func matchAutoscaleSetting(path string) bool { return autoscaleSettingRegex.MatchString(path) } +func matchActionGroup(path string) bool { return actionGroupRegex.MatchString(path) } +func matchMetricAlert(path string) bool { return metricAlertRegex.MatchString(path) } +func matchDiagnosticSetting(path string) bool { return diagnosticSettingRegex.MatchString(path) } + +// ============================================================================= +// CDN Profile Handler +// ============================================================================= + +func (s *Server) handleCDNProfile(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + // Extract components from path + subscriptionID := parts[2] + resourceGroup := parts[4] + profileName := parts[8] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Cdn/profiles/%s", + subscriptionID, resourceGroup, profileName) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Sku CDNSku `json:"sku"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + if req.Sku.Name == "" { + s.badRequest(w, "sku.name is required") + return + } + + profile := CDNProfile{ + ID: resourceID, + Name: profileName, + Type: "Microsoft.Cdn/profiles", + Location: req.Location, + Tags: req.Tags, + Sku: req.Sku, + Properties: CDNProfileProps{ + ResourceState: "Active", + ProvisioningState: "Succeeded", + }, + } + + s.store.mu.Lock() + s.store.cdnProfiles[resourceID] = profile + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(profile) + + case http.MethodGet: + s.store.mu.RLock() + profile, exists := s.store.cdnProfiles[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "CDN Profile", profileName) + return + } + + json.NewEncoder(w).Encode(profile) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.cdnProfiles, resourceID) + // Also delete associated endpoints + for k := range s.store.cdnEndpoints { + if strings.HasPrefix(k, resourceID+"/endpoints/") { + delete(s.store.cdnEndpoints, k) + } + } + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// CDN Endpoint Handler +// ============================================================================= + +func (s *Server) handleCDNEndpoint(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + profileName := parts[8] + endpointName := parts[10] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Cdn/profiles/%s/endpoints/%s", + subscriptionID, resourceGroup, profileName, endpointName) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Properties CDNEndpointProps `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + if len(req.Properties.Origins) == 0 { + s.badRequest(w, "At least one origin is required") + return + } + + endpoint := CDNEndpoint{ + ID: resourceID, + Name: endpointName, + Type: "Microsoft.Cdn/profiles/endpoints", + Location: req.Location, + Tags: req.Tags, + Properties: CDNEndpointProps{ + HostName: fmt.Sprintf("%s.azureedge.net", endpointName), + OriginHostHeader: req.Properties.OriginHostHeader, + Origins: req.Properties.Origins, + OriginPath: req.Properties.OriginPath, + IsHttpAllowed: req.Properties.IsHttpAllowed, + IsHttpsAllowed: true, + IsCompressionEnabled: req.Properties.IsCompressionEnabled, + ResourceState: "Running", + ProvisioningState: "Succeeded", + DeliveryPolicy: req.Properties.DeliveryPolicy, + }, + } + + s.store.mu.Lock() + s.store.cdnEndpoints[resourceID] = endpoint + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(endpoint) + + case http.MethodGet: + s.store.mu.RLock() + endpoint, exists := s.store.cdnEndpoints[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "CDN Endpoint", endpointName) + return + } + + json.NewEncoder(w).Encode(endpoint) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.cdnEndpoints, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// CDN Custom Domain Handler +// ============================================================================= + +func (s *Server) handleCDNCustomDomain(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + profileName := parts[8] + endpointName := parts[10] + customDomainName := parts[12] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Cdn/profiles/%s/endpoints/%s/customDomains/%s", + subscriptionID, resourceGroup, profileName, endpointName, customDomainName) + + switch r.Method { + case http.MethodPut: + var req struct { + Properties struct { + HostName string `json:"hostName"` + } `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + if req.Properties.HostName == "" { + s.badRequest(w, "properties.hostName is required") + return + } + + customDomain := CDNCustomDomain{ + ID: resourceID, + Name: customDomainName, + Type: "Microsoft.Cdn/profiles/endpoints/customDomains", + Properties: CDNCustomDomainProps{ + HostName: req.Properties.HostName, + ResourceState: "Active", + ProvisioningState: "Succeeded", + }, + } + + s.store.mu.Lock() + s.store.cdnCustomDomains[resourceID] = customDomain + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(customDomain) + + case http.MethodGet: + s.store.mu.RLock() + customDomain, exists := s.store.cdnCustomDomains[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "CDN Custom Domain", customDomainName) + return + } + + json.NewEncoder(w).Encode(customDomain) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.cdnCustomDomains, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// CDN Custom Domain HTTPS Handler +// ============================================================================= + +func (s *Server) handleCDNOperationResults(w http.ResponseWriter, r *http.Request) { + // Operation results endpoint - returns the status of an async operation + // Always return Succeeded to indicate the operation is complete + + if r.Method != http.MethodGet { + s.methodNotAllowed(w) + return + } + + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.WriteHeader(http.StatusOK) + + response := map[string]interface{}{ + "status": "Succeeded", + "properties": map[string]interface{}{ + "customHttpsProvisioningState": "Enabled", + "customHttpsProvisioningSubstate": "CertificateDeployed", + }, + } + json.NewEncoder(w).Encode(response) +} + +func (s *Server) handleCDNCustomDomainHttps(w http.ResponseWriter, r *http.Request, enable bool) { + // enableCustomHttps and disableCustomHttps endpoints + // These are POST requests to enable/disable HTTPS on a custom domain + + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + // Extract resource info from path for the polling URL + path := r.URL.Path + // Remove /enableCustomHttps or /disableCustomHttps from path to get custom domain path + customDomainPath := strings.TrimSuffix(path, "/enableCustomHttps") + customDomainPath = strings.TrimSuffix(customDomainPath, "/disableCustomHttps") + + // Azure async operations require a Location or Azure-AsyncOperation header for polling + // The Location header should point to the operation status endpoint + operationID := fmt.Sprintf("op-%d", time.Now().UnixNano()) + asyncOperationURL := fmt.Sprintf("https://%s%s/operationresults/%s", r.Host, customDomainPath, operationID) + + w.Header().Set("Azure-AsyncOperation", asyncOperationURL) + w.Header().Set("Location", asyncOperationURL) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.WriteHeader(http.StatusAccepted) + + // Return a custom domain response with the updated HTTPS state + response := map[string]interface{}{ + "properties": map[string]interface{}{ + "customHttpsProvisioningState": "Enabled", + "customHttpsProvisioningSubstate": "CertificateDeployed", + }, + } + if !enable { + response["properties"].(map[string]interface{})["customHttpsProvisioningState"] = "Disabled" + response["properties"].(map[string]interface{})["customHttpsProvisioningSubstate"] = "" + } + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// DNS Zone Handler +// ============================================================================= + +func (s *Server) handleDNSZone(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + zoneName := parts[8] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/dnszones/%s", + subscriptionID, resourceGroup, zoneName) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + } + json.NewDecoder(r.Body).Decode(&req) + + zone := DNSZone{ + ID: resourceID, + Name: zoneName, + Type: "Microsoft.Network/dnszones", + Location: "global", + Tags: req.Tags, + Properties: DNSZoneProps{ + MaxNumberOfRecordSets: 10000, + NumberOfRecordSets: 2, + NameServers: []string{ + "ns1-01.azure-dns.com.", + "ns2-01.azure-dns.net.", + "ns3-01.azure-dns.org.", + "ns4-01.azure-dns.info.", + }, + }, + } + + s.store.mu.Lock() + s.store.dnsZones[resourceID] = zone + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(zone) + + case http.MethodGet: + s.store.mu.RLock() + zone, exists := s.store.dnsZones[resourceID] + s.store.mu.RUnlock() + + if !exists { + // Return a fake zone for any GET request (like storage account handler) + // This allows data sources to work without pre-creating the zone + zone = DNSZone{ + ID: resourceID, + Name: zoneName, + Type: "Microsoft.Network/dnszones", + Location: "global", + Properties: DNSZoneProps{ + MaxNumberOfRecordSets: 10000, + NumberOfRecordSets: 2, + NameServers: []string{ + "ns1-01.azure-dns.com.", + "ns2-01.azure-dns.net.", + "ns3-01.azure-dns.org.", + "ns4-01.azure-dns.info.", + }, + }, + } + } + + json.NewEncoder(w).Encode(zone) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.dnsZones, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// DNS CNAME Record Handler +// ============================================================================= + +func (s *Server) handleDNSCNAMERecord(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + zoneName := parts[8] + recordName := parts[10] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/dnszones/%s/CNAME/%s", + subscriptionID, resourceGroup, zoneName, recordName) + + switch r.Method { + case http.MethodPut: + var req struct { + Properties DNSCNAMERecordProps `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + if req.Properties.CNAMERecord == nil || req.Properties.CNAMERecord.Cname == "" { + s.badRequest(w, "CNAMERecord.cname is required") + return + } + + record := DNSCNAMERecord{ + ID: resourceID, + Name: recordName, + Type: "Microsoft.Network/dnszones/CNAME", + Etag: fmt.Sprintf("etag-%d", time.Now().Unix()), + Properties: DNSCNAMERecordProps{ + TTL: req.Properties.TTL, + Fqdn: fmt.Sprintf("%s.%s.", recordName, zoneName), + CNAMERecord: req.Properties.CNAMERecord, + }, + } + + s.store.mu.Lock() + s.store.dnsCNAMERecords[resourceID] = record + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(record) + + case http.MethodGet: + s.store.mu.RLock() + record, exists := s.store.dnsCNAMERecords[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "DNS CNAME Record", recordName) + return + } + + json.NewEncoder(w).Encode(record) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.dnsCNAMERecords, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Storage Account Handler (Read-only for data source) +// ============================================================================= + +func (s *Server) handleStorageAccount(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + accountName := parts[8] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Storage/storageAccounts/%s", + subscriptionID, resourceGroup, accountName) + + switch r.Method { + case http.MethodGet: + // For data sources, we return a pre-configured storage account + // The account "exists" as long as it's queried + account := StorageAccount{ + ID: resourceID, + Name: accountName, + Type: "Microsoft.Storage/storageAccounts", + Location: "eastus", + Kind: "StorageV2", + Sku: StorageSku{ + Name: "Standard_LRS", + Tier: "Standard", + }, + Properties: StorageAccountProps{ + PrimaryEndpoints: StorageEndpoints{ + Blob: fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), + Web: fmt.Sprintf("https://%s.z13.web.core.windows.net/", accountName), + }, + ProvisioningState: "Succeeded", + }, + } + + json.NewEncoder(w).Encode(account) + + case http.MethodPut: + // Allow creating storage accounts for completeness + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Kind string `json:"kind"` + Sku StorageSku `json:"sku"` + } + json.NewDecoder(r.Body).Decode(&req) + + account := StorageAccount{ + ID: resourceID, + Name: accountName, + Type: "Microsoft.Storage/storageAccounts", + Location: req.Location, + Kind: req.Kind, + Sku: req.Sku, + Properties: StorageAccountProps{ + PrimaryEndpoints: StorageEndpoints{ + Blob: fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), + Web: fmt.Sprintf("https://%s.z13.web.core.windows.net/", accountName), + }, + ProvisioningState: "Succeeded", + }, + } + + s.store.mu.Lock() + s.store.storageAccounts[resourceID] = account + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(account) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Storage Account Keys Handler +// ============================================================================= + +func (s *Server) handleStorageAccountKeys(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + // Return mock storage account keys + response := map[string]interface{}{ + "keys": []map[string]interface{}{ + { + "keyName": "key1", + "value": "mock-storage-key-1-base64encodedvalue==", + "permissions": "FULL", + }, + { + "keyName": "key2", + "value": "mock-storage-key-2-base64encodedvalue==", + "permissions": "FULL", + }, + }, + } + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Blob Storage Handler (for azurerm backend state storage) +// ============================================================================= + +func (s *Server) handleBlobStorage(w http.ResponseWriter, r *http.Request) { + host := r.Host + path := r.URL.Path + query := r.URL.Query() + + // Extract account name from host (e.g., "devstoreaccount1.blob.core.windows.net" -> "devstoreaccount1") + accountName := strings.Split(host, ".")[0] + + // Remove leading slash and parse path + path = strings.TrimPrefix(path, "/") + parts := strings.SplitN(path, "/", 2) + + containerName := "" + blobName := "" + + if len(parts) >= 1 && parts[0] != "" { + containerName = parts[0] + } + if len(parts) >= 2 { + blobName = parts[1] + } + + log.Printf("Blob Storage: account=%s container=%s blob=%s restype=%s comp=%s", accountName, containerName, blobName, query.Get("restype"), query.Get("comp")) + + // List blobs in container (restype=container&comp=list) + // Must check this BEFORE container operations since ListBlobs also has restype=container + if containerName != "" && query.Get("comp") == "list" { + s.handleListBlobs(w, r, accountName, containerName) + return + } + + // Check if this is a container operation (restype=container without comp=list) + if query.Get("restype") == "container" { + s.handleBlobContainer(w, r, accountName, containerName) + return + } + + // Otherwise, it's a blob operation + if containerName != "" && blobName != "" { + s.handleBlob(w, r, accountName, containerName, blobName) + return + } + + // Unknown operation + w.Header().Set("Content-Type", "application/xml") + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, `InvalidUriThe requested URI does not represent any resource on the server.`) +} + +func (s *Server) handleBlobContainer(w http.ResponseWriter, r *http.Request, accountName, containerName string) { + containerKey := fmt.Sprintf("%s/%s", accountName, containerName) + + switch r.Method { + case http.MethodPut: + // Create container + now := time.Now().UTC().Format(time.RFC1123) + etag := fmt.Sprintf("\"0x%X\"", time.Now().UnixNano()) + + container := BlobContainer{ + Name: containerName, + Properties: BlobContainerProps{ + LastModified: now, + Etag: etag, + }, + } + + s.store.mu.Lock() + s.store.blobContainers[containerKey] = container + s.store.mu.Unlock() + + w.Header().Set("ETag", etag) + w.Header().Set("Last-Modified", now) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.WriteHeader(http.StatusCreated) + + case http.MethodGet, http.MethodHead: + // Get container properties + s.store.mu.RLock() + container, exists := s.store.blobContainers[containerKey] + s.store.mu.RUnlock() + + if !exists { + s.blobNotFound(w, "ContainerNotFound", fmt.Sprintf("The specified container does not exist. Container: %s", containerName)) + return + } + + w.Header().Set("ETag", container.Properties.Etag) + w.Header().Set("Last-Modified", container.Properties.LastModified) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.Header().Set("x-ms-lease-status", "unlocked") + w.Header().Set("x-ms-lease-state", "available") + w.Header().Set("x-ms-has-immutability-policy", "false") + w.Header().Set("x-ms-has-legal-hold", "false") + w.WriteHeader(http.StatusOK) + + case http.MethodDelete: + // Delete container + s.store.mu.Lock() + delete(s.store.blobContainers, containerKey) + // Also delete all blobs in the container + for k := range s.store.blobs { + if strings.HasPrefix(k, containerKey+"/") { + delete(s.store.blobs, k) + } + } + s.store.mu.Unlock() + + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.WriteHeader(http.StatusAccepted) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } +} + +func (s *Server) handleBlob(w http.ResponseWriter, r *http.Request, accountName, containerName, blobName string) { + containerKey := fmt.Sprintf("%s/%s", accountName, containerName) + blobKey := fmt.Sprintf("%s/%s/%s", accountName, containerName, blobName) + query := r.URL.Query() + + // Handle lease operations + if query.Get("comp") == "lease" { + s.handleBlobLease(w, r, blobKey) + return + } + + // Handle metadata operations (used for state locking) + if query.Get("comp") == "metadata" { + s.handleBlobMetadata(w, r, blobKey) + return + } + + // Handle block blob operations (staged uploads) + if query.Get("comp") == "block" { + s.handlePutBlock(w, r, blobKey) + return + } + + if query.Get("comp") == "blocklist" { + s.handleBlockList(w, r, accountName, containerName, blobName, blobKey) + return + } + + // Handle blob properties + if query.Get("comp") == "properties" { + s.handleBlobProperties(w, r, blobKey) + return + } + + switch r.Method { + case http.MethodPut: + // Upload blob + s.store.mu.RLock() + _, containerExists := s.store.blobContainers[containerKey] + s.store.mu.RUnlock() + + if !containerExists { + s.blobNotFound(w, "ContainerNotFound", fmt.Sprintf("The specified container does not exist. Container: %s", containerName)) + return + } + + // Read request body + body := make([]byte, 0) + if r.Body != nil { + body, _ = io.ReadAll(r.Body) + } + + now := time.Now().UTC().Format(time.RFC1123) + etag := fmt.Sprintf("\"0x%X\"", time.Now().UnixNano()) + contentType := r.Header.Get("Content-Type") + if contentType == "" { + contentType = "application/octet-stream" + } + + // Extract metadata from x-ms-meta-* headers + metadata := make(map[string]string) + for key, values := range r.Header { + lowerKey := strings.ToLower(key) + if strings.HasPrefix(lowerKey, "x-ms-meta-") { + metaKey := strings.TrimPrefix(lowerKey, "x-ms-meta-") + if len(values) > 0 { + metadata[metaKey] = values[0] + } + } + } + + blob := Blob{ + Name: blobName, + Content: body, + Metadata: metadata, + Properties: BlobProps{ + LastModified: now, + Etag: etag, + ContentLength: len(body), + ContentType: contentType, + }, + } + + s.store.mu.Lock() + s.store.blobs[blobKey] = blob + s.store.mu.Unlock() + + w.Header().Set("ETag", etag) + w.Header().Set("Last-Modified", now) + w.Header().Set("Content-MD5", "") + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.Header().Set("x-ms-request-server-encrypted", "true") + w.WriteHeader(http.StatusCreated) + + case http.MethodGet: + // Download blob + s.store.mu.RLock() + blob, exists := s.store.blobs[blobKey] + s.store.mu.RUnlock() + + if !exists { + s.blobNotFound(w, "BlobNotFound", fmt.Sprintf("The specified blob does not exist. Blob: %s", blobName)) + return + } + + w.Header().Set("Content-Type", blob.Properties.ContentType) + w.Header().Set("Content-Length", fmt.Sprintf("%d", blob.Properties.ContentLength)) + w.Header().Set("ETag", blob.Properties.Etag) + w.Header().Set("Last-Modified", blob.Properties.LastModified) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.Header().Set("x-ms-blob-type", "BlockBlob") + w.WriteHeader(http.StatusOK) + w.Write(blob.Content) + + case http.MethodHead: + // Get blob properties + s.store.mu.RLock() + blob, exists := s.store.blobs[blobKey] + s.store.mu.RUnlock() + + if !exists { + s.blobNotFound(w, "BlobNotFound", fmt.Sprintf("The specified blob does not exist. Blob: %s", blobName)) + return + } + + // Return metadata as x-ms-meta-* headers + for key, value := range blob.Metadata { + w.Header().Set("x-ms-meta-"+key, value) + } + + w.Header().Set("Content-Type", blob.Properties.ContentType) + w.Header().Set("Content-Length", fmt.Sprintf("%d", blob.Properties.ContentLength)) + w.Header().Set("ETag", blob.Properties.Etag) + w.Header().Set("Last-Modified", blob.Properties.LastModified) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.Header().Set("x-ms-blob-type", "BlockBlob") + w.Header().Set("x-ms-lease-status", "unlocked") + w.Header().Set("x-ms-lease-state", "available") + w.WriteHeader(http.StatusOK) + + case http.MethodDelete: + // Delete blob + s.store.mu.Lock() + _, exists := s.store.blobs[blobKey] + if exists { + delete(s.store.blobs, blobKey) + } + s.store.mu.Unlock() + + if !exists { + s.blobNotFound(w, "BlobNotFound", fmt.Sprintf("The specified blob does not exist. Blob: %s", blobName)) + return + } + + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.Header().Set("x-ms-delete-type-permanent", "true") + w.WriteHeader(http.StatusAccepted) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } +} + +func (s *Server) handleBlobMetadata(w http.ResponseWriter, r *http.Request, blobKey string) { + log.Printf("Blob Metadata: method=%s key=%s", r.Method, blobKey) + + switch r.Method { + case http.MethodPut: + // Set blob metadata - used for state locking + // Extract metadata from x-ms-meta-* headers + metadata := make(map[string]string) + for key, values := range r.Header { + lowerKey := strings.ToLower(key) + if strings.HasPrefix(lowerKey, "x-ms-meta-") { + metaKey := strings.TrimPrefix(lowerKey, "x-ms-meta-") + if len(values) > 0 { + metadata[metaKey] = values[0] + log.Printf("Blob Metadata: storing %s=%s", metaKey, values[0]) + } + } + } + + s.store.mu.Lock() + blob, exists := s.store.blobs[blobKey] + if exists { + blob.Metadata = metadata + s.store.blobs[blobKey] = blob + } else { + // Create a placeholder blob if it doesn't exist (for lock files) + now := time.Now().UTC().Format(time.RFC1123) + etag := fmt.Sprintf("\"0x%X\"", time.Now().UnixNano()) + s.store.blobs[blobKey] = Blob{ + Name: "", + Content: []byte{}, + Metadata: metadata, + Properties: BlobProps{ + LastModified: now, + Etag: etag, + ContentLength: 0, + ContentType: "application/octet-stream", + }, + } + } + s.store.mu.Unlock() + + w.Header().Set("ETag", fmt.Sprintf("\"0x%X\"", time.Now().UnixNano())) + w.Header().Set("Last-Modified", time.Now().UTC().Format(time.RFC1123)) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.Header().Set("x-ms-request-server-encrypted", "true") + w.WriteHeader(http.StatusOK) + + case http.MethodGet, http.MethodHead: + // Get blob metadata + s.store.mu.RLock() + blob, exists := s.store.blobs[blobKey] + s.store.mu.RUnlock() + + if !exists { + s.blobNotFound(w, "BlobNotFound", "The specified blob does not exist.") + return + } + + // Return metadata as x-ms-meta-* headers + for key, value := range blob.Metadata { + w.Header().Set("x-ms-meta-"+key, value) + log.Printf("Blob Metadata: returning x-ms-meta-%s=%s", key, value) + } + + w.Header().Set("ETag", blob.Properties.Etag) + w.Header().Set("Last-Modified", blob.Properties.LastModified) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.WriteHeader(http.StatusOK) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } +} + +func (s *Server) handleBlobLease(w http.ResponseWriter, r *http.Request, blobKey string) { + leaseAction := r.Header.Get("x-ms-lease-action") + log.Printf("Blob Lease: action=%s key=%s", leaseAction, blobKey) + + switch leaseAction { + case "acquire": + // Acquire lease - return a mock lease ID + leaseID := fmt.Sprintf("lease-%d", time.Now().UnixNano()) + w.Header().Set("x-ms-lease-id", leaseID) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.WriteHeader(http.StatusCreated) + + case "release", "break": + // Release or break lease + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.WriteHeader(http.StatusOK) + + case "renew": + // Renew lease + leaseID := r.Header.Get("x-ms-lease-id") + w.Header().Set("x-ms-lease-id", leaseID) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.WriteHeader(http.StatusOK) + + default: + w.WriteHeader(http.StatusBadRequest) + } +} + +func (s *Server) handlePutBlock(w http.ResponseWriter, r *http.Request, blobKey string) { + blockID := r.URL.Query().Get("blockid") + log.Printf("Put Block: key=%s blockid=%s", blobKey, blockID) + + if r.Method != http.MethodPut { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + // Read block data + body, _ := io.ReadAll(r.Body) + + // Store the block + blockKey := fmt.Sprintf("%s/%s", blobKey, blockID) + s.store.mu.Lock() + s.store.blobBlocks[blockKey] = body + s.store.mu.Unlock() + + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.Header().Set("x-ms-content-crc64", "") + w.Header().Set("x-ms-request-server-encrypted", "true") + w.WriteHeader(http.StatusCreated) +} + +func (s *Server) handleBlockList(w http.ResponseWriter, r *http.Request, accountName, containerName, blobName, blobKey string) { + log.Printf("Block List: method=%s key=%s", r.Method, blobKey) + + switch r.Method { + case http.MethodPut: + // Commit block list - assemble blocks into final blob + // For simplicity, we just create an empty blob (the actual block assembly would be complex) + // The terraform state is typically small enough to not use block uploads + body, _ := io.ReadAll(r.Body) + log.Printf("Block List body: %s", string(body)) + + now := time.Now().UTC().Format(time.RFC1123) + etag := fmt.Sprintf("\"0x%X\"", time.Now().UnixNano()) + + // Create the blob (simplified - in reality would assemble from blocks) + blob := Blob{ + Name: blobName, + Content: []byte{}, // Would normally assemble from blocks + Properties: BlobProps{ + LastModified: now, + Etag: etag, + ContentLength: 0, + ContentType: "application/octet-stream", + }, + } + + s.store.mu.Lock() + s.store.blobs[blobKey] = blob + // Clean up staged blocks + for k := range s.store.blobBlocks { + if strings.HasPrefix(k, blobKey+"/") { + delete(s.store.blobBlocks, k) + } + } + s.store.mu.Unlock() + + w.Header().Set("ETag", etag) + w.Header().Set("Last-Modified", now) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.Header().Set("x-ms-request-server-encrypted", "true") + w.WriteHeader(http.StatusCreated) + + case http.MethodGet: + // Get block list + w.Header().Set("Content-Type", "application/xml") + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ``) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } +} + +func (s *Server) handleBlobProperties(w http.ResponseWriter, r *http.Request, blobKey string) { + log.Printf("Blob Properties: method=%s key=%s", r.Method, blobKey) + + s.store.mu.RLock() + blob, exists := s.store.blobs[blobKey] + s.store.mu.RUnlock() + + if !exists { + s.blobNotFound(w, "BlobNotFound", "The specified blob does not exist.") + return + } + + switch r.Method { + case http.MethodPut: + // Set blob properties + w.Header().Set("ETag", blob.Properties.Etag) + w.Header().Set("Last-Modified", blob.Properties.LastModified) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.WriteHeader(http.StatusOK) + + case http.MethodGet, http.MethodHead: + // Get blob properties + w.Header().Set("Content-Type", blob.Properties.ContentType) + w.Header().Set("Content-Length", fmt.Sprintf("%d", blob.Properties.ContentLength)) + w.Header().Set("ETag", blob.Properties.Etag) + w.Header().Set("Last-Modified", blob.Properties.LastModified) + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.Header().Set("x-ms-blob-type", "BlockBlob") + w.WriteHeader(http.StatusOK) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } +} + +func (s *Server) handleListBlobs(w http.ResponseWriter, r *http.Request, accountName, containerName string) { + containerKey := fmt.Sprintf("%s/%s", accountName, containerName) + prefix := containerKey + "/" + + s.store.mu.RLock() + _, containerExists := s.store.blobContainers[containerKey] + var blobs []Blob + for k, b := range s.store.blobs { + if strings.HasPrefix(k, prefix) { + blobs = append(blobs, b) + } + } + s.store.mu.RUnlock() + + if !containerExists { + s.blobNotFound(w, "ContainerNotFound", fmt.Sprintf("The specified container does not exist. Container: %s", containerName)) + return + } + + w.Header().Set("Content-Type", "application/xml") + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ``, accountName, containerName) + for _, b := range blobs { + fmt.Fprintf(w, `%s%d%s%s%sBlockBlobunlockedavailable`, + b.Name, b.Properties.ContentLength, b.Properties.ContentType, b.Properties.LastModified, b.Properties.Etag) + } + fmt.Fprintf(w, ``) +} + +func (s *Server) blobNotFound(w http.ResponseWriter, code, message string) { + w.Header().Set("Content-Type", "application/xml") + w.Header().Set("x-ms-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("x-ms-version", "2021-06-08") + w.WriteHeader(http.StatusNotFound) + fmt.Fprintf(w, `%s%s`, code, message) +} + +// ============================================================================= +// App Service Plan Handler +// ============================================================================= + +func (s *Server) handleAppServicePlan(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + planName := parts[8] + + // Build canonical resource ID (lowercase path for consistent storage key) + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Web/serverfarms/%s", + subscriptionID, resourceGroup, planName) + // Use lowercase key for storage to handle case-insensitive lookups + storeKey := strings.ToLower(resourceID) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Kind string `json:"kind"` + Sku AppServiceSku `json:"sku"` + Properties struct { + PerSiteScaling bool `json:"perSiteScaling"` + ZoneRedundant bool `json:"zoneRedundant"` + Reserved bool `json:"reserved"` + } `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + // Derive SKU tier from name + skuTier := "Standard" + if strings.HasPrefix(req.Sku.Name, "P") { + skuTier = "PremiumV3" + } else if strings.HasPrefix(req.Sku.Name, "B") { + skuTier = "Basic" + } else if strings.HasPrefix(req.Sku.Name, "F") { + skuTier = "Free" + } + + plan := AppServicePlan{ + ID: resourceID, + Name: planName, + Type: "Microsoft.Web/serverfarms", + Location: req.Location, + Tags: req.Tags, + Kind: req.Kind, + Sku: AppServiceSku{ + Name: req.Sku.Name, + Tier: skuTier, + Size: req.Sku.Name, + Family: string(req.Sku.Name[0]), + Capacity: 1, + }, + Properties: AppServicePlanProps{ + ProvisioningState: "Succeeded", + Status: "Ready", + MaximumNumberOfWorkers: 10, + NumberOfSites: 0, + PerSiteScaling: req.Properties.PerSiteScaling, + ZoneRedundant: req.Properties.ZoneRedundant, + Reserved: req.Properties.Reserved, + }, + } + + s.store.mu.Lock() + s.store.appServicePlans[storeKey] = plan + s.store.mu.Unlock() + + // Azure SDK for azurerm provider expects 200 for PUT operations + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(plan) + + case http.MethodGet: + s.store.mu.RLock() + plan, exists := s.store.appServicePlans[storeKey] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "App Service Plan", planName) + return + } + + json.NewEncoder(w).Encode(plan) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.appServicePlans, storeKey) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Web App Auth Settings Handler +// ============================================================================= + +func (s *Server) handleWebAppAuthSettings(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + // Return default disabled auth settings + response := map[string]interface{}{ + "id": r.URL.Path, + "name": "authsettings", + "type": "Microsoft.Web/sites/config", + "properties": map[string]interface{}{ + "enabled": false, + "runtimeVersion": "~1", + "unauthenticatedClientAction": "RedirectToLoginPage", + "tokenStoreEnabled": false, + "allowedExternalRedirectUrls": []string{}, + "defaultProvider": "AzureActiveDirectory", + "clientId": nil, + "issuer": nil, + "allowedAudiences": nil, + "additionalLoginParams": nil, + "isAadAutoProvisioned": false, + "aadClaimsAuthorization": nil, + "googleClientId": nil, + "facebookAppId": nil, + "gitHubClientId": nil, + "twitterConsumerKey": nil, + "microsoftAccountClientId": nil, + }, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Auth Settings V2 Handler +// ============================================================================= + +func (s *Server) handleWebAppAuthSettingsV2(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + // Return default disabled auth settings V2 + response := map[string]interface{}{ + "id": r.URL.Path, + "name": "authsettingsV2", + "type": "Microsoft.Web/sites/config", + "properties": map[string]interface{}{ + "platform": map[string]interface{}{ + "enabled": false, + "runtimeVersion": "~1", + }, + "globalValidation": map[string]interface{}{ + "requireAuthentication": false, + "unauthenticatedClientAction": "RedirectToLoginPage", + }, + "identityProviders": map[string]interface{}{}, + "login": map[string]interface{}{ + "routes": map[string]interface{}{}, + "tokenStore": map[string]interface{}{"enabled": false}, + "preserveUrlFragmentsForLogins": false, + }, + "httpSettings": map[string]interface{}{ + "requireHttps": true, + }, + }, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App App Settings Handler +// ============================================================================= + +func (s *Server) handleWebAppAppSettings(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + // Return empty app settings + response := map[string]interface{}{ + "id": r.URL.Path, + "name": "appsettings", + "type": "Microsoft.Web/sites/config", + "properties": map[string]string{}, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Connection Strings Handler +// ============================================================================= + +func (s *Server) handleWebAppConnStrings(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + // Return empty connection strings + response := map[string]interface{}{ + "id": r.URL.Path, + "name": "connectionstrings", + "type": "Microsoft.Web/sites/config", + "properties": map[string]interface{}{}, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Sticky Settings Handler +// ============================================================================= + +func (s *Server) handleWebAppStickySettings(w http.ResponseWriter, r *http.Request) { + // Handle both GET and PUT methods + if r.Method != http.MethodGet && r.Method != http.MethodPut { + s.methodNotAllowed(w) + return + } + + // Return default sticky settings + response := map[string]interface{}{ + "id": r.URL.Path, + "name": "slotConfigNames", + "type": "Microsoft.Web/sites/config", + "properties": map[string]interface{}{ + "appSettingNames": []string{}, + "connectionStringNames": []string{}, + "azureStorageConfigNames": []string{}, + }, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Config Logs Handler +// ============================================================================= + +func (s *Server) handleWebAppConfigLogs(w http.ResponseWriter, r *http.Request) { + // Handle both GET and PUT methods + if r.Method != http.MethodGet && r.Method != http.MethodPut { + s.methodNotAllowed(w) + return + } + + // Return default logging configuration + response := map[string]interface{}{ + "id": r.URL.Path, + "name": "logs", + "type": "Microsoft.Web/sites/config", + "properties": map[string]interface{}{ + "applicationLogs": map[string]interface{}{ + "fileSystem": map[string]interface{}{ + "level": "Off", + }, + "azureBlobStorage": nil, + "azureTableStorage": nil, + }, + "httpLogs": map[string]interface{}{ + "fileSystem": map[string]interface{}{ + "retentionInMb": 35, + "retentionInDays": 0, + "enabled": false, + }, + "azureBlobStorage": nil, + }, + "failedRequestsTracing": map[string]interface{}{ + "enabled": false, + }, + "detailedErrorMessages": map[string]interface{}{ + "enabled": false, + }, + }, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Storage Accounts Handler +// ============================================================================= + +func (s *Server) handleWebAppStorageAccounts(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + // Return empty storage accounts + response := map[string]interface{}{ + "id": r.URL.Path, + "name": "azurestorageaccounts", + "type": "Microsoft.Web/sites/config", + "properties": map[string]interface{}{}, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Backups Handler +// ============================================================================= + +func (s *Server) handleWebAppBackups(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + // Return empty backup config (no backup configured) + response := map[string]interface{}{ + "id": r.URL.Path, + "name": "backup", + "type": "Microsoft.Web/sites/config", + "properties": map[string]interface{}{ + "backupName": nil, + "enabled": false, + "storageAccountUrl": nil, + "backupSchedule": nil, + "databases": []interface{}{}, + }, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Metadata Handler +// ============================================================================= + +func (s *Server) handleWebAppMetadata(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + // Return empty metadata + response := map[string]interface{}{ + "id": r.URL.Path, + "name": "metadata", + "type": "Microsoft.Web/sites/config", + "properties": map[string]interface{}{}, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Publishing Credentials Handler +// ============================================================================= + +func (s *Server) handleWebAppPubCreds(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + path := r.URL.Path + parts := strings.Split(path, "/") + appName := parts[8] + + // Return publishing credentials + response := map[string]interface{}{ + "id": path, + "name": "publishingcredentials", + "type": "Microsoft.Web/sites/config", + "properties": map[string]interface{}{ + "name": "$" + appName, + "publishingUserName": "$" + appName, + "publishingPassword": "mock-publishing-password", + "scmUri": fmt.Sprintf("https://%s.scm.azurewebsites.net", appName), + }, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Config Fallback Handler (for any unhandled config endpoints) +// ============================================================================= + +func (s *Server) handleWebAppConfigFallback(w http.ResponseWriter, r *http.Request) { + // This handles any config endpoint we haven't explicitly implemented + // Return an empty properties response which should work for most cases + path := r.URL.Path + + // Extract config name from path + parts := strings.Split(path, "/") + configName := "unknown" + for i, p := range parts { + if p == "config" && i+1 < len(parts) { + configName = parts[i+1] + break + } + } + + response := map[string]interface{}{ + "id": path, + "name": configName, + "type": "Microsoft.Web/sites/config", + "properties": map[string]interface{}{}, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Basic Auth Policy Handler (ftp/scm publishing credentials) +// ============================================================================= + +func (s *Server) handleWebAppBasicAuthPolicy(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + policyType := parts[len(parts)-1] // "ftp" or "scm" + + if r.Method != http.MethodGet && r.Method != http.MethodPut { + s.methodNotAllowed(w) + return + } + + // Return policy that allows basic auth + response := map[string]interface{}{ + "id": path, + "name": policyType, + "type": "Microsoft.Web/sites/basicPublishingCredentialsPolicies", + "properties": map[string]interface{}{ + "allow": true, + }, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Web App Traffic Routing Handler +// Handles az webapp traffic-routing set/clear/show commands +// ============================================================================= + +func (s *Server) handleWebAppTrafficRouting(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + appName := parts[8] + + // Key for storing traffic routing rules + routingKey := fmt.Sprintf("%s:%s:%s", subscriptionID, resourceGroup, appName) + + switch r.Method { + case http.MethodGet: + // Return current traffic routing rules + s.store.mu.RLock() + rules, exists := s.store.trafficRouting[routingKey] + s.store.mu.RUnlock() + + if !exists { + // Return empty routing rules + response := []TrafficRoutingRule{} + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) + return + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(rules) + + case http.MethodPost: + // Set traffic routing (from az webapp traffic-routing set) + var req struct { + SlotName string `json:"slotName"` + TrafficPercent int `json:"trafficPercent"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + // Store the traffic routing rule + rules := []TrafficRoutingRule{ + { + ActionHostName: fmt.Sprintf("%s-%s.azurewebsites.net", appName, req.SlotName), + ReroutePercentage: req.TrafficPercent, + Name: req.SlotName, + }, + } + + s.store.mu.Lock() + s.store.trafficRouting[routingKey] = rules + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(rules) + + case http.MethodDelete: + // Clear traffic routing (from az webapp traffic-routing clear) + s.store.mu.Lock() + delete(s.store.trafficRouting, routingKey) + s.store.mu.Unlock() + + // Return empty array + response := []TrafficRoutingRule{} + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Web App Check Name Availability Handler +// ============================================================================= + +func (s *Server) handleWebAppCheckName(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + s.methodNotAllowed(w) + return + } + + var req struct { + Name string `json:"name"` + Type string `json:"type"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + // Always return that the name is available (for testing purposes) + response := struct { + NameAvailable bool `json:"nameAvailable"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` + }{ + NameAvailable: true, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Linux Web App Handler +// ============================================================================= + +func (s *Server) handleLinuxWebApp(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + appName := parts[8] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Web/sites/%s", + subscriptionID, resourceGroup, appName) + // Use lowercase key for storage to handle case-insensitive lookups + storeKey := strings.ToLower(resourceID) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Kind string `json:"kind"` + Identity *AppIdentity `json:"identity"` + Properties struct { + ServerFarmID string `json:"serverFarmId"` + HTTPSOnly bool `json:"httpsOnly"` + ClientAffinityEnabled bool `json:"clientAffinityEnabled"` + SiteConfig *WebAppSiteConfig `json:"siteConfig"` + } `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + // Generate mock identity if system-assigned requested + var identity *AppIdentity + if req.Identity != nil && (req.Identity.Type == "SystemAssigned" || req.Identity.Type == "SystemAssigned, UserAssigned") { + identity = &AppIdentity{ + Type: req.Identity.Type, + PrincipalID: fmt.Sprintf("principal-%s", appName), + TenantID: "mock-tenant-id", + UserIDs: req.Identity.UserIDs, + } + } else if req.Identity != nil { + identity = req.Identity + } + + app := LinuxWebApp{ + ID: resourceID, + Name: appName, + Type: "Microsoft.Web/sites", + Location: req.Location, + Tags: req.Tags, + Kind: req.Kind, + Identity: identity, + Properties: LinuxWebAppProps{ + ProvisioningState: "Succeeded", + State: "Running", + DefaultHostName: fmt.Sprintf("%s.azurewebsites.net", appName), + ServerFarmID: req.Properties.ServerFarmID, + HTTPSOnly: req.Properties.HTTPSOnly, + ClientAffinityEnabled: req.Properties.ClientAffinityEnabled, + OutboundIPAddresses: "20.42.0.1,20.42.0.2,20.42.0.3", + PossibleOutboundIPAddresses: "20.42.0.1,20.42.0.2,20.42.0.3,20.42.0.4,20.42.0.5", + CustomDomainVerificationID: fmt.Sprintf("verification-id-%s", appName), + SiteConfig: req.Properties.SiteConfig, + }, + } + + s.store.mu.Lock() + s.store.linuxWebApps[storeKey] = app + s.store.mu.Unlock() + + // Azure SDK for azurerm provider expects 200 for PUT operations + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(app) + + case http.MethodGet: + s.store.mu.RLock() + app, exists := s.store.linuxWebApps[storeKey] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Web App", appName) + return + } + + json.NewEncoder(w).Encode(app) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.linuxWebApps, storeKey) + // Also delete associated slots (use lowercase prefix for consistency) + slotPrefix := strings.ToLower(resourceID + "/slots/") + for k := range s.store.webAppSlots { + if strings.HasPrefix(strings.ToLower(k), slotPrefix) { + delete(s.store.webAppSlots, k) + } + } + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Web App Config Handler +// ============================================================================= + +func (s *Server) handleWebAppConfig(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + appName := parts[8] + + appResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Web/sites/%s", + subscriptionID, resourceGroup, appName) + // Use lowercase key for storage to handle case-insensitive lookups + storeKey := strings.ToLower(appResourceID) + + switch r.Method { + case http.MethodPut, http.MethodPatch: + var req struct { + Properties WebAppSiteConfig `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + s.store.mu.Lock() + if app, exists := s.store.linuxWebApps[storeKey]; exists { + app.Properties.SiteConfig = &req.Properties + s.store.linuxWebApps[storeKey] = app + } + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(map[string]interface{}{ + "properties": req.Properties, + }) + + case http.MethodGet: + s.store.mu.RLock() + app, exists := s.store.linuxWebApps[storeKey] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Web App", appName) + return + } + + config := app.Properties.SiteConfig + if config == nil { + config = &WebAppSiteConfig{} + } + // Ensure Experiments is always initialized (Azure CLI expects it for traffic routing) + if config.Experiments == nil { + config.Experiments = &WebAppExperiments{ + RampUpRules: []RampUpRule{}, + } + } + + json.NewEncoder(w).Encode(map[string]interface{}{ + "properties": config, + }) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Web App Slot Handler +// ============================================================================= + +func (s *Server) handleWebAppSlot(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + appName := parts[8] + slotName := parts[10] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Web/sites/%s/slots/%s", + subscriptionID, resourceGroup, appName, slotName) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Kind string `json:"kind"` + Properties struct { + ServerFarmID string `json:"serverFarmId"` + SiteConfig *WebAppSiteConfig `json:"siteConfig"` + } `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + slot := WebAppSlot{ + ID: resourceID, + Name: fmt.Sprintf("%s/%s", appName, slotName), + Type: "Microsoft.Web/sites/slots", + Location: req.Location, + Tags: req.Tags, + Kind: req.Kind, + Properties: LinuxWebAppProps{ + ProvisioningState: "Succeeded", + State: "Running", + DefaultHostName: fmt.Sprintf("%s-%s.azurewebsites.net", appName, slotName), + ServerFarmID: req.Properties.ServerFarmID, + OutboundIPAddresses: "20.42.0.1,20.42.0.2,20.42.0.3", + PossibleOutboundIPAddresses: "20.42.0.1,20.42.0.2,20.42.0.3,20.42.0.4,20.42.0.5", + CustomDomainVerificationID: fmt.Sprintf("verification-id-%s-%s", appName, slotName), + SiteConfig: req.Properties.SiteConfig, + }, + } + + s.store.mu.Lock() + s.store.webAppSlots[resourceID] = slot + s.store.mu.Unlock() + + // Azure SDK for azurerm provider expects 200 for PUT operations + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(slot) + + case http.MethodGet: + s.store.mu.RLock() + slot, exists := s.store.webAppSlots[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Web App Slot", slotName) + return + } + + json.NewEncoder(w).Encode(slot) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.webAppSlots, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Web App Slot Config Handler +// ============================================================================= + +func (s *Server) handleWebAppSlotConfig(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + appName := parts[8] + slotName := parts[10] + + slotResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Web/sites/%s/slots/%s", + subscriptionID, resourceGroup, appName, slotName) + + switch r.Method { + case http.MethodGet: + // Return the site config from the stored slot + s.store.mu.RLock() + slot, exists := s.store.webAppSlots[slotResourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Web App Slot", slotName) + return + } + + // Return site config + config := struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Properties *WebAppSiteConfig `json:"properties"` + }{ + ID: slotResourceID + "/config/web", + Name: "web", + Type: "Microsoft.Web/sites/slots/config", + Properties: slot.Properties.SiteConfig, + } + + // If no site config stored, return a default + if config.Properties == nil { + config.Properties = &WebAppSiteConfig{ + AlwaysOn: false, + HTTP20Enabled: true, + MinTLSVersion: "1.2", + FtpsState: "Disabled", + LinuxFxVersion: "DOCKER|nginx:latest", + WebSocketsEnabled: false, + } + } + // Ensure Experiments is always initialized (Azure CLI expects it for traffic routing) + if config.Properties.Experiments == nil { + config.Properties.Experiments = &WebAppExperiments{ + RampUpRules: []RampUpRule{}, + } + } + + json.NewEncoder(w).Encode(config) + + case http.MethodPut: + var req struct { + Properties *WebAppSiteConfig `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + // Update the slot's site config + s.store.mu.Lock() + if slot, exists := s.store.webAppSlots[slotResourceID]; exists { + slot.Properties.SiteConfig = req.Properties + s.store.webAppSlots[slotResourceID] = slot + } + s.store.mu.Unlock() + + config := struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Properties *WebAppSiteConfig `json:"properties"` + }{ + ID: slotResourceID + "/config/web", + Name: "web", + Type: "Microsoft.Web/sites/slots/config", + Properties: req.Properties, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(config) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Web App Slot Config Fallback Handler +// Handles various slot config endpoints like appSettings, connectionstrings, etc. +// ============================================================================= + +func (s *Server) handleWebAppSlotConfigFallback(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + appName := parts[8] + slotName := parts[10] + configType := parts[12] + + slotResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Web/sites/%s/slots/%s", + subscriptionID, resourceGroup, appName, slotName) + + // Check if slot exists + s.store.mu.RLock() + _, exists := s.store.webAppSlots[slotResourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Web App Slot", slotName) + return + } + + // Return empty/default response for various config types + switch configType { + case "appSettings": + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": slotResourceID + "/config/appSettings", + "name": "appSettings", + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]string{}, + }) + case "connectionstrings": + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": slotResourceID + "/config/connectionstrings", + "name": "connectionstrings", + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]interface{}{}, + }) + case "authsettings": + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": slotResourceID + "/config/authsettings", + "name": "authsettings", + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]interface{}{ + "enabled": false, + }, + }) + case "authsettingsV2": + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": slotResourceID + "/config/authsettingsV2", + "name": "authsettingsV2", + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]interface{}{ + "platform": map[string]interface{}{ + "enabled": false, + }, + }, + }) + case "logs": + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": slotResourceID + "/config/logs", + "name": "logs", + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]interface{}{ + "applicationLogs": map[string]interface{}{ + "fileSystem": map[string]interface{}{ + "level": "Off", + }, + }, + "httpLogs": map[string]interface{}{ + "fileSystem": map[string]interface{}{ + "enabled": false, + }, + }, + "detailedErrorMessages": map[string]interface{}{ + "enabled": false, + }, + "failedRequestsTracing": map[string]interface{}{ + "enabled": false, + }, + }, + }) + case "slotConfigNames": + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": slotResourceID + "/config/slotConfigNames", + "name": "slotConfigNames", + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]interface{}{ + "appSettingNames": []string{}, + "connectionStringNames": []string{}, + }, + }) + case "azurestorageaccounts": + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": slotResourceID + "/config/azurestorageaccounts", + "name": "azurestorageaccounts", + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]interface{}{}, + }) + case "backup": + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": slotResourceID + "/config/backup", + "name": "backup", + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]interface{}{ + "enabled": false, + }, + }) + case "metadata": + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": slotResourceID + "/config/metadata", + "name": "metadata", + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]interface{}{}, + }) + case "publishingcredentials": + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": slotResourceID + "/config/publishingcredentials", + "name": "publishingcredentials", + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]interface{}{ + "publishingUserName": fmt.Sprintf("$%s__%s", appName, slotName), + "publishingPassword": "mock-password", + }, + }) + default: + // Generic empty response for unknown config types + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": fmt.Sprintf("%s/config/%s", slotResourceID, configType), + "name": configType, + "type": "Microsoft.Web/sites/slots/config", + "properties": map[string]interface{}{}, + }) + } +} + +// ============================================================================= +// Web App Slot Basic Auth Policy Handler +// Handles /sites/{app}/slots/{slot}/basicPublishingCredentialsPolicies/(ftp|scm) +// ============================================================================= + +func (s *Server) handleWebAppSlotBasicAuthPolicy(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + appName := parts[8] + slotName := parts[10] + policyType := parts[12] // "ftp" or "scm" + + slotResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Web/sites/%s/slots/%s", + subscriptionID, resourceGroup, appName, slotName) + + policyID := fmt.Sprintf("%s/basicPublishingCredentialsPolicies/%s", slotResourceID, policyType) + + switch r.Method { + case http.MethodGet: + // Return default policy (basic auth allowed) + json.NewEncoder(w).Encode(map[string]interface{}{ + "id": policyID, + "name": policyType, + "type": "Microsoft.Web/sites/slots/basicPublishingCredentialsPolicies", + "properties": map[string]interface{}{ + "allow": true, + }, + }) + + case http.MethodPut: + var req struct { + Properties struct { + Allow bool `json:"allow"` + } `json:"properties"` + } + json.NewDecoder(r.Body).Decode(&req) + + response := map[string]interface{}{ + "id": policyID, + "name": policyType, + "type": "Microsoft.Web/sites/slots/basicPublishingCredentialsPolicies", + "properties": map[string]interface{}{ + "allow": req.Properties.Allow, + }, + } + + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(response) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Log Analytics Workspace Handler +// ============================================================================= + +func (s *Server) handleLogAnalytics(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + workspaceName := parts[8] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.OperationalInsights/workspaces/%s", + subscriptionID, resourceGroup, workspaceName) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Properties struct { + Sku struct { + Name string `json:"name"` + } `json:"sku"` + RetentionInDays int `json:"retentionInDays"` + } `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + workspace := LogAnalyticsWorkspace{ + ID: resourceID, + Name: workspaceName, + Type: "Microsoft.OperationalInsights/workspaces", + Location: req.Location, + Tags: req.Tags, + Properties: LogAnalyticsWorkspaceProps{ + ProvisioningState: "Succeeded", + CustomerID: fmt.Sprintf("customer-id-%s", workspaceName), + Sku: struct { + Name string `json:"name"` + }{ + Name: req.Properties.Sku.Name, + }, + RetentionInDays: req.Properties.RetentionInDays, + }, + } + + s.store.mu.Lock() + s.store.logAnalyticsWorkspaces[resourceID] = workspace + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(workspace) + + case http.MethodGet: + s.store.mu.RLock() + workspace, exists := s.store.logAnalyticsWorkspaces[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Log Analytics Workspace", workspaceName) + return + } + + json.NewEncoder(w).Encode(workspace) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.logAnalyticsWorkspaces, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Application Insights Handler +// ============================================================================= + +func (s *Server) handleAppInsights(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + insightsName := parts[8] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Insights/components/%s", + subscriptionID, resourceGroup, insightsName) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Kind string `json:"kind"` + Properties struct { + ApplicationType string `json:"Application_Type"` + WorkspaceResourceID string `json:"WorkspaceResourceId"` + } `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + instrumentationKey := fmt.Sprintf("ikey-%s", insightsName) + appID := fmt.Sprintf("appid-%s", insightsName) + + insights := ApplicationInsights{ + ID: resourceID, + Name: insightsName, + Type: "Microsoft.Insights/components", + Location: req.Location, + Tags: req.Tags, + Kind: req.Kind, + Properties: ApplicationInsightsProps{ + ProvisioningState: "Succeeded", + ApplicationID: appID, + InstrumentationKey: instrumentationKey, + ConnectionString: fmt.Sprintf("InstrumentationKey=%s;IngestionEndpoint=https://eastus-0.in.applicationinsights.azure.com/", instrumentationKey), + WorkspaceResourceID: req.Properties.WorkspaceResourceID, + }, + } + + s.store.mu.Lock() + s.store.appInsights[resourceID] = insights + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(insights) + + case http.MethodGet: + s.store.mu.RLock() + insights, exists := s.store.appInsights[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Application Insights", insightsName) + return + } + + json.NewEncoder(w).Encode(insights) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.appInsights, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Autoscale Setting Handler +// ============================================================================= + +func (s *Server) handleAutoscaleSetting(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + settingName := parts[8] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Insights/autoscalesettings/%s", + subscriptionID, resourceGroup, settingName) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Properties AutoscaleSettingProps `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + setting := AutoscaleSetting{ + ID: resourceID, + Name: settingName, + Type: "Microsoft.Insights/autoscalesettings", + Location: req.Location, + Tags: req.Tags, + Properties: AutoscaleSettingProps{ + ProvisioningState: "Succeeded", + Enabled: req.Properties.Enabled, + TargetResourceURI: req.Properties.TargetResourceURI, + TargetResourceLocation: req.Location, + Profiles: req.Properties.Profiles, + Notifications: req.Properties.Notifications, + }, + } + + s.store.mu.Lock() + s.store.autoscaleSettings[resourceID] = setting + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(setting) + + case http.MethodGet: + s.store.mu.RLock() + setting, exists := s.store.autoscaleSettings[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Autoscale Setting", settingName) + return + } + + json.NewEncoder(w).Encode(setting) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.autoscaleSettings, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Action Group Handler +// ============================================================================= + +func (s *Server) handleActionGroup(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + groupName := parts[8] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Insights/actionGroups/%s", + subscriptionID, resourceGroup, groupName) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Properties ActionGroupProps `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + group := ActionGroup{ + ID: resourceID, + Name: groupName, + Type: "Microsoft.Insights/actionGroups", + Location: "global", + Tags: req.Tags, + Properties: ActionGroupProps{ + GroupShortName: req.Properties.GroupShortName, + Enabled: req.Properties.Enabled, + EmailReceivers: req.Properties.EmailReceivers, + WebhookReceivers: req.Properties.WebhookReceivers, + }, + } + + s.store.mu.Lock() + s.store.actionGroups[resourceID] = group + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(group) + + case http.MethodGet: + s.store.mu.RLock() + group, exists := s.store.actionGroups[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Action Group", groupName) + return + } + + json.NewEncoder(w).Encode(group) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.actionGroups, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Metric Alert Handler +// ============================================================================= + +func (s *Server) handleMetricAlert(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + + subscriptionID := parts[2] + resourceGroup := parts[4] + alertName := parts[8] + + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Insights/metricAlerts/%s", + subscriptionID, resourceGroup, alertName) + + switch r.Method { + case http.MethodPut: + var req struct { + Location string `json:"location"` + Tags map[string]string `json:"tags"` + Properties MetricAlertProps `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + alert := MetricAlert{ + ID: resourceID, + Name: alertName, + Type: "Microsoft.Insights/metricAlerts", + Location: "global", + Tags: req.Tags, + Properties: MetricAlertProps{ + Description: req.Properties.Description, + Severity: req.Properties.Severity, + Enabled: req.Properties.Enabled, + Scopes: req.Properties.Scopes, + EvaluationFrequency: req.Properties.EvaluationFrequency, + WindowSize: req.Properties.WindowSize, + Criteria: req.Properties.Criteria, + Actions: req.Properties.Actions, + }, + } + + s.store.mu.Lock() + s.store.metricAlerts[resourceID] = alert + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(alert) + + case http.MethodGet: + s.store.mu.RLock() + alert, exists := s.store.metricAlerts[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Metric Alert", alertName) + return + } + + json.NewEncoder(w).Encode(alert) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.metricAlerts, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Diagnostic Setting Handler +// ============================================================================= + +func (s *Server) handleDiagnosticSetting(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + // Diagnostic settings are nested under resources, extract name from end + parts := strings.Split(path, "/") + settingName := parts[len(parts)-1] + + // Use full path as resource ID + resourceID := path + + switch r.Method { + case http.MethodPut: + var req struct { + Properties DiagnosticSettingProps `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + s.badRequest(w, "Invalid request body") + return + } + + setting := DiagnosticSetting{ + ID: resourceID, + Name: settingName, + Type: "Microsoft.Insights/diagnosticSettings", + Properties: DiagnosticSettingProps{ + WorkspaceID: req.Properties.WorkspaceID, + Logs: req.Properties.Logs, + Metrics: req.Properties.Metrics, + }, + } + + s.store.mu.Lock() + s.store.diagnosticSettings[resourceID] = setting + s.store.mu.Unlock() + + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(setting) + + case http.MethodGet: + s.store.mu.RLock() + setting, exists := s.store.diagnosticSettings[resourceID] + s.store.mu.RUnlock() + + if !exists { + s.resourceNotFound(w, "Diagnostic Setting", settingName) + return + } + + json.NewEncoder(w).Encode(setting) + + case http.MethodDelete: + s.store.mu.Lock() + delete(s.store.diagnosticSettings, resourceID) + s.store.mu.Unlock() + + w.WriteHeader(http.StatusOK) + + default: + s.methodNotAllowed(w) + } +} + +// ============================================================================= +// Error Responses +// ============================================================================= + +func (s *Server) notFound(w http.ResponseWriter, path string) { + w.WriteHeader(http.StatusNotFound) + json.NewEncoder(w).Encode(AzureError{ + Error: AzureErrorDetail{ + Code: "PathNotFound", + Message: fmt.Sprintf("The path '%s' is not a valid Azure API path", path), + }, + }) +} + +func (s *Server) resourceNotFound(w http.ResponseWriter, resourceType, name string) { + w.WriteHeader(http.StatusNotFound) + json.NewEncoder(w).Encode(AzureError{ + Error: AzureErrorDetail{ + Code: "ResourceNotFound", + Message: fmt.Sprintf("The %s '%s' was not found.", resourceType, name), + }, + }) +} + +func (s *Server) badRequest(w http.ResponseWriter, message string) { + w.WriteHeader(http.StatusBadRequest) + json.NewEncoder(w).Encode(AzureError{ + Error: AzureErrorDetail{ + Code: "BadRequest", + Message: message, + }, + }) +} + +func (s *Server) methodNotAllowed(w http.ResponseWriter) { + w.WriteHeader(http.StatusMethodNotAllowed) + json.NewEncoder(w).Encode(AzureError{ + Error: AzureErrorDetail{ + Code: "MethodNotAllowed", + Message: "The HTTP method is not allowed for this resource", + }, + }) +} + +// ============================================================================= +// OAuth Token Handler (for Azure AD authentication) +// ============================================================================= + +type OAuthToken struct { + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + ExpiresOn int64 `json:"expires_on,omitempty"` + NotBefore int64 `json:"not_before,omitempty"` + TokenType string `json:"token_type"` + Resource string `json:"resource,omitempty"` + Scope string `json:"scope,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` +} + +func (s *Server) handleOpenIDConfiguration(w http.ResponseWriter, r *http.Request) { + // Return OpenID Connect configuration document + // This is required by MSAL for Azure CLI authentication + host := r.Host + if host == "" { + host = "login.microsoftonline.com" + } + + config := map[string]interface{}{ + "issuer": fmt.Sprintf("https://%s/mock-tenant-id/v2.0", host), + "authorization_endpoint": fmt.Sprintf("https://%s/mock-tenant-id/oauth2/v2.0/authorize", host), + "token_endpoint": fmt.Sprintf("https://%s/mock-tenant-id/oauth2/v2.0/token", host), + "device_authorization_endpoint": fmt.Sprintf("https://%s/mock-tenant-id/oauth2/v2.0/devicecode", host), + "userinfo_endpoint": fmt.Sprintf("https://%s/oidc/userinfo", host), + "end_session_endpoint": fmt.Sprintf("https://%s/mock-tenant-id/oauth2/v2.0/logout", host), + "jwks_uri": fmt.Sprintf("https://%s/mock-tenant-id/discovery/v2.0/keys", host), + "response_types_supported": []string{"code", "id_token", "code id_token", "token id_token", "token"}, + "response_modes_supported": []string{"query", "fragment", "form_post"}, + "subject_types_supported": []string{"pairwise"}, + "id_token_signing_alg_values_supported": []string{"RS256"}, + "scopes_supported": []string{"openid", "profile", "email", "offline_access"}, + "token_endpoint_auth_methods_supported": []string{"client_secret_post", "client_secret_basic"}, + "claims_supported": []string{"sub", "iss", "aud", "exp", "iat", "name", "email"}, + "tenant_region_scope": "NA", + "cloud_instance_name": "microsoftonline.com", + "cloud_graph_host_name": "graph.windows.net", + "msgraph_host": "graph.microsoft.com", + } + + json.NewEncoder(w).Encode(config) +} + +func (s *Server) handleInstanceDiscovery(w http.ResponseWriter, r *http.Request) { + // Return instance discovery response for MSAL + response := map[string]interface{}{ + "tenant_discovery_endpoint": "https://login.microsoftonline.com/mock-tenant-id/v2.0/.well-known/openid-configuration", + "api-version": "1.1", + "metadata": []map[string]interface{}{ + { + "preferred_network": "login.microsoftonline.com", + "preferred_cache": "login.windows.net", + "aliases": []string{"login.microsoftonline.com", "login.windows.net", "login.microsoft.com"}, + }, + }, + } + + json.NewEncoder(w).Encode(response) +} + +func (s *Server) handleOAuth(w http.ResponseWriter, r *http.Request) { + // Return a mock OAuth token that looks like a valid JWT + // JWT format: header.payload.signature (all base64url encoded) + // The Azure SDK parses claims from the token, so it must be valid JWT format + + now := time.Now().Unix() + exp := now + 3600 + + // JWT Header (typ: JWT, alg: RS256) + header := "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9" + + // JWT Payload with required Azure claims + // Decoded: {"aud":"https://management.azure.com/","iss":"https://sts.windows.net/mock-tenant-id/","iat":NOW,"nbf":NOW,"exp":EXP,"oid":"mock-object-id","sub":"mock-subject","tid":"mock-tenant-id"} + payloadJSON := fmt.Sprintf(`{"aud":"https://management.azure.com/","iss":"https://sts.windows.net/mock-tenant-id/","iat":%d,"nbf":%d,"exp":%d,"oid":"mock-object-id","sub":"mock-subject","tid":"mock-tenant-id"}`, now, now, exp) + payload := base64.RawURLEncoding.EncodeToString([]byte(payloadJSON)) + + // Mock signature (doesn't need to be valid, just present) + signature := "mock-signature-placeholder" + + mockJWT := header + "." + payload + "." + signature + + token := OAuthToken{ + AccessToken: mockJWT, + ExpiresIn: 3600, + ExpiresOn: exp, + NotBefore: now, + TokenType: "Bearer", + Resource: "https://management.azure.com/", + Scope: "https://management.azure.com/.default", + RefreshToken: "mock-refresh-token", + } + json.NewEncoder(w).Encode(token) +} + +// ============================================================================= +// Provider Registration Handler +// ============================================================================= + +func (s *Server) handleListProviders(w http.ResponseWriter, r *http.Request) { + // Return a list of registered providers that the azurerm provider needs + providers := []map[string]interface{}{ + {"namespace": "Microsoft.Cdn", "registrationState": "Registered"}, + {"namespace": "Microsoft.Network", "registrationState": "Registered"}, + {"namespace": "Microsoft.Storage", "registrationState": "Registered"}, + {"namespace": "Microsoft.Resources", "registrationState": "Registered"}, + {"namespace": "Microsoft.Authorization", "registrationState": "Registered"}, + {"namespace": "Microsoft.Web", "registrationState": "Registered"}, + {"namespace": "Microsoft.Insights", "registrationState": "Registered"}, + {"namespace": "Microsoft.OperationalInsights", "registrationState": "Registered"}, + } + response := map[string]interface{}{ + "value": providers, + } + json.NewEncoder(w).Encode(response) +} + +func (s *Server) handleProviderRegistration(w http.ResponseWriter, r *http.Request) { + // Return success for provider registration checks + response := map[string]interface{}{ + "registrationState": "Registered", + } + json.NewEncoder(w).Encode(response) +} + +// ============================================================================= +// Subscription Handler +// ============================================================================= + +func (s *Server) handleSubscription(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + parts := strings.Split(path, "/") + subscriptionID := parts[2] + + subscription := map[string]interface{}{ + "id": fmt.Sprintf("/subscriptions/%s", subscriptionID), + "subscriptionId": subscriptionID, + "displayName": "Mock Subscription", + "state": "Enabled", + } + json.NewEncoder(w).Encode(subscription) +} + +// ============================================================================= +// Main +// ============================================================================= + +func main() { + server := NewServer() + + log.Println("Azure Mock API Server") + log.Println("=====================") + log.Println("ARM Endpoints:") + log.Println(" OAuth Token: /{tenant}/oauth2/token (POST)") + log.Println(" Subscriptions: /subscriptions/{sub}") + log.Println(" CDN Profiles: .../Microsoft.Cdn/profiles/{name}") + log.Println(" CDN Endpoints: .../Microsoft.Cdn/profiles/{profile}/endpoints/{name}") + log.Println(" DNS Zones: .../Microsoft.Network/dnszones/{name}") + log.Println(" DNS CNAME: .../Microsoft.Network/dnszones/{zone}/CNAME/{name}") + log.Println(" Storage Accounts: .../Microsoft.Storage/storageAccounts/{name}") + log.Println("") + log.Println("App Service Endpoints:") + log.Println(" Service Plans: .../Microsoft.Web/serverfarms/{name}") + log.Println(" Web Apps: .../Microsoft.Web/sites/{name}") + log.Println(" Web App Slots: .../Microsoft.Web/sites/{app}/slots/{slot}") + log.Println(" Web App Config: .../Microsoft.Web/sites/{app}/config/web") + log.Println("") + log.Println("Monitoring Endpoints:") + log.Println(" Log Analytics: .../Microsoft.OperationalInsights/workspaces/{name}") + log.Println(" App Insights: .../Microsoft.Insights/components/{name}") + log.Println(" Autoscale: .../Microsoft.Insights/autoscalesettings/{name}") + log.Println(" Action Groups: .../Microsoft.Insights/actionGroups/{name}") + log.Println(" Metric Alerts: .../Microsoft.Insights/metricAlerts/{name}") + log.Println("") + log.Println("Blob Storage Endpoints (Host: {account}.blob.core.windows.net):") + log.Println(" Containers: /{container}?restype=container") + log.Println(" Blobs: /{container}/{blob}") + log.Println("") + log.Println("Starting server on :8080...") + + if err := http.ListenAndServe(":8080", server); err != nil { + log.Fatalf("Server failed: %v", err) + } +} diff --git a/testing/docker/docker-compose.integration.yml b/testing/docker/docker-compose.integration.yml new file mode 100644 index 00000000..0faeb76c --- /dev/null +++ b/testing/docker/docker-compose.integration.yml @@ -0,0 +1,182 @@ +services: + # ============================================================================= + # LocalStack - AWS services emulator (S3, Route53, DynamoDB, etc.) + # ============================================================================= + localstack: + image: localstack/localstack:latest + container_name: integration-localstack + ports: + - "4566:4566" + environment: + - DEBUG=0 + - SERVICES=s3,route53,sts,iam,dynamodb,acm + - DEFAULT_REGION=us-east-1 + - AWS_DEFAULT_REGION=us-east-1 + - AWS_ACCESS_KEY_ID=test + - AWS_SECRET_ACCESS_KEY=test + - PERSISTENCE=0 + - EAGER_SERVICE_LOADING=1 + volumes: + - localstack-data:/var/lib/localstack + - /var/run/docker.sock:/var/run/docker.sock + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:4566/_localstack/health"] + interval: 5s + timeout: 5s + retries: 10 + networks: + integration-network: + ipv4_address: 172.28.0.2 + + # ============================================================================= + # Moto - CloudFront emulator (LocalStack doesn't support CloudFront well) + # ============================================================================= + moto: + image: motoserver/moto:latest + container_name: integration-moto + ports: + - "5555:5000" + environment: + - MOTO_PORT=5000 + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:5000/moto-api/"] + interval: 5s + timeout: 5s + retries: 10 + networks: + integration-network: + ipv4_address: 172.28.0.3 + + # ============================================================================= + # Azure Mock - Azure REST API mock server for CDN, DNS, Storage + # ============================================================================= + azure-mock: + build: + context: ./azure-mock + dockerfile: Dockerfile + container_name: integration-azure-mock + ports: + - "8090:8080" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 5s + timeout: 5s + retries: 10 + networks: + integration-network: + ipv4_address: 172.28.0.4 + + # ============================================================================= + # Smocker - API mock server for nullplatform API + # ============================================================================= + smocker: + image: thiht/smocker:latest + container_name: integration-smocker + ports: + - "8080:8080" # Mock server port (HTTP) + - "8081:8081" # Admin API port (configure mocks) + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8081/version"] + interval: 5s + timeout: 5s + retries: 10 + networks: + integration-network: + ipv4_address: 172.28.0.11 + + # ============================================================================= + # Nginx - HTTPS reverse proxy for smocker (np CLI requires HTTPS) + # ============================================================================= + nginx-proxy: + image: nginx:alpine + container_name: integration-nginx + ports: + - "8443:443" # HTTPS port for np CLI + volumes: + - ./nginx.conf:/etc/nginx/nginx.conf:ro + - ./certs:/certs:ro + depends_on: + - smocker + - azure-mock + healthcheck: + test: ["CMD", "curl", "-fk", "https://localhost:443/mocks"] + interval: 5s + timeout: 5s + retries: 10 + networks: + integration-network: + ipv4_address: 172.28.0.10 + + # ============================================================================= + # Test Runner - Container that runs the integration tests + # ============================================================================= + test-runner: + build: + context: . + dockerfile: Dockerfile.test-runner + container_name: integration-test-runner + environment: + # Terminal for BATS pretty formatter + - TERM=xterm-256color + # nullplatform CLI configuration + - NULLPLATFORM_API_KEY=test-api-key + # AWS Configuration - point to LocalStack + - AWS_ENDPOINT_URL=http://localstack:4566 + - LOCALSTACK_ENDPOINT=http://localstack:4566 + - MOTO_ENDPOINT=http://moto:5000 + - AWS_ACCESS_KEY_ID=test + - AWS_SECRET_ACCESS_KEY=test + - AWS_DEFAULT_REGION=us-east-1 + - AWS_PAGER= + # Smocker configuration + - SMOCKER_HOST=http://smocker:8081 + # Azure Mock configuration (handles both ARM API and Blob Storage) + - AZURE_MOCK_ENDPOINT=http://azure-mock:8080 + # ARM_ACCESS_KEY is required by azurerm backend to build auth headers + # (azure-mock ignores authentication, but SDK validates base64 format) + - ARM_ACCESS_KEY=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== + # Azure credentials for mock (azurerm provider) + - ARM_CLIENT_ID=mock-client-id + - ARM_CLIENT_SECRET=mock-client-secret + - ARM_TENANT_ID=mock-tenant-id + - ARM_SUBSCRIPTION_ID=mock-subscription-id + - ARM_SKIP_PROVIDER_REGISTRATION=true + # Azure CLI service principal credentials (same as ARM_*) + - AZURE_CLIENT_ID=mock-client-id + - AZURE_CLIENT_SECRET=mock-client-secret + - AZURE_TENANT_ID=mock-tenant-id + - AZURE_SUBSCRIPTION_ID=mock-subscription-id + # Disable TLS verification for np CLI (talking to smocker) + - NODE_TLS_REJECT_UNAUTHORIZED=0 + # Python/Azure CLI certificate configuration + - REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt + - CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt + - SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt + - AZURE_CLI_DISABLE_CONNECTION_VERIFICATION=1 + - PATH=/root/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + extra_hosts: + # Redirect nullplatform API to smocker mock server (via nginx-proxy) + - "api.nullplatform.com:172.28.0.10" + # Redirect Azure APIs to azure-mock server (via nginx-proxy for HTTPS) + - "management.azure.com:172.28.0.10" + - "login.microsoftonline.com:172.28.0.10" + # Redirect Azure Blob Storage to azure-mock (via nginx-proxy for HTTPS) + - "devstoreaccount1.blob.core.windows.net:172.28.0.10" + volumes: + # Mount the project for tests + - ../..:/workspace + # Mount the TLS certificate for trusting smocker + - ./certs/cert.pem:/usr/local/share/ca-certificates/smocker.crt:ro + working_dir: /workspace + networks: + - integration-network + +networks: + integration-network: + driver: bridge + ipam: + config: + - subnet: 172.28.0.0/16 + +volumes: + localstack-data: diff --git a/testing/docker/generate-certs.sh b/testing/docker/generate-certs.sh new file mode 100755 index 00000000..02f7f7bf --- /dev/null +++ b/testing/docker/generate-certs.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Generate self-signed certificates for smocker TLS + +CERT_DIR="$(dirname "$0")/certs" +mkdir -p "$CERT_DIR" + +# Generate private key +openssl genrsa -out "$CERT_DIR/key.pem" 2048 2>/dev/null + +# Generate self-signed certificate +openssl req -new -x509 \ + -key "$CERT_DIR/key.pem" \ + -out "$CERT_DIR/cert.pem" \ + -days 365 \ + -subj "/CN=api.nullplatform.com" \ + -addext "subjectAltName=DNS:api.nullplatform.com,DNS:localhost" \ + 2>/dev/null + +echo "Certificates generated in $CERT_DIR" diff --git a/testing/docker/nginx.conf b/testing/docker/nginx.conf new file mode 100644 index 00000000..f3940af1 --- /dev/null +++ b/testing/docker/nginx.conf @@ -0,0 +1,83 @@ +events { + worker_connections 1024; +} + +http { + upstream smocker { + server smocker:8080; + } + + upstream azure_mock { + server azure-mock:8080; + } + + + # nullplatform API proxy + server { + listen 443 ssl; + server_name api.nullplatform.com; + + ssl_certificate /certs/cert.pem; + ssl_certificate_key /certs/key.pem; + + location / { + proxy_pass http://smocker; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + + # Azure Resource Manager API proxy + server { + listen 443 ssl; + server_name management.azure.com; + + ssl_certificate /certs/cert.pem; + ssl_certificate_key /certs/key.pem; + + location / { + proxy_pass http://azure_mock; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + + # Azure AD OAuth proxy + server { + listen 443 ssl; + server_name login.microsoftonline.com; + + ssl_certificate /certs/cert.pem; + ssl_certificate_key /certs/key.pem; + + location / { + proxy_pass http://azure_mock; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + + # Azure Blob Storage proxy (redirect to Azure Mock) + # Blob storage API is routed to azure-mock which handles it based on Host header + server { + listen 443 ssl; + server_name devstoreaccount1.blob.core.windows.net; + + ssl_certificate /certs/cert.pem; + ssl_certificate_key /certs/key.pem; + + location / { + proxy_pass http://azure_mock; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } +} diff --git a/testing/integration_helpers.sh b/testing/integration_helpers.sh new file mode 100755 index 00000000..c8d620e3 --- /dev/null +++ b/testing/integration_helpers.sh @@ -0,0 +1,924 @@ +#!/bin/bash +# ============================================================================= +# Integration Test Helpers for BATS +# +# Provides helper functions for integration testing with cloud provider support. +# +# Usage in BATS test files: +# setup_file() { +# load "${PROJECT_ROOT}/testing/integration_helpers.sh" +# integration_setup --cloud-provider aws +# } +# +# teardown_file() { +# integration_teardown +# } +# +# Supported cloud providers: aws, azure, gcp +# ============================================================================= + +# ============================================================================= +# Colors +# ============================================================================= +INTEGRATION_RED='\033[0;31m' +INTEGRATION_GREEN='\033[0;32m' +INTEGRATION_YELLOW='\033[1;33m' +INTEGRATION_CYAN='\033[0;36m' +INTEGRATION_NC='\033[0m' + +# ============================================================================= +# Global State +# ============================================================================= +INTEGRATION_CLOUD_PROVIDER="${INTEGRATION_CLOUD_PROVIDER:-}" +INTEGRATION_COMPOSE_FILE="${INTEGRATION_COMPOSE_FILE:-}" + +# Determine module root from PROJECT_ROOT environment variable +# PROJECT_ROOT is set by the test runner (run_integration_tests.sh) +if [[ -z "${INTEGRATION_MODULE_ROOT:-}" ]]; then + INTEGRATION_MODULE_ROOT="${PROJECT_ROOT:-.}" +fi +export INTEGRATION_MODULE_ROOT + +# Default AWS/LocalStack configuration (can be overridden) +export LOCALSTACK_ENDPOINT="${LOCALSTACK_ENDPOINT:-http://localhost:4566}" +export MOTO_ENDPOINT="${MOTO_ENDPOINT:-http://localhost:5555}" +export AWS_ENDPOINT_URL="${AWS_ENDPOINT_URL:-$LOCALSTACK_ENDPOINT}" +export AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID:-test}" +export AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY:-test}" +export AWS_DEFAULT_REGION="${AWS_DEFAULT_REGION:-us-east-1}" +export AWS_PAGER="" + +# Default Azure Mock configuration (can be overridden) +export AZURE_MOCK_ENDPOINT="${AZURE_MOCK_ENDPOINT:-http://localhost:8090}" +export ARM_CLIENT_ID="${ARM_CLIENT_ID:-mock-client-id}" +export ARM_CLIENT_SECRET="${ARM_CLIENT_SECRET:-mock-client-secret}" +export ARM_TENANT_ID="${ARM_TENANT_ID:-mock-tenant-id}" +export ARM_SUBSCRIPTION_ID="${ARM_SUBSCRIPTION_ID:-mock-subscription-id}" +export ARM_SKIP_PROVIDER_REGISTRATION="${ARM_SKIP_PROVIDER_REGISTRATION:-true}" + +# Smocker configuration for API mocking +export SMOCKER_HOST="${SMOCKER_HOST:-http://localhost:8081}" + +# ============================================================================= +# Setup & Teardown +# ============================================================================= + +integration_setup() { + local cloud_provider="" + + # Parse arguments + while [[ $# -gt 0 ]]; do + case $1 in + --cloud-provider) + cloud_provider="$2" + shift 2 + ;; + *) + echo -e "${INTEGRATION_RED}Unknown argument: $1${INTEGRATION_NC}" + return 1 + ;; + esac + done + + # Validate cloud provider + if [[ -z "$cloud_provider" ]]; then + echo -e "${INTEGRATION_RED}Error: --cloud-provider is required${INTEGRATION_NC}" + echo "Usage: integration_setup --cloud-provider " + return 1 + fi + + case "$cloud_provider" in + aws|azure|gcp) + INTEGRATION_CLOUD_PROVIDER="$cloud_provider" + ;; + *) + echo -e "${INTEGRATION_RED}Error: Unsupported cloud provider: $cloud_provider${INTEGRATION_NC}" + echo "Supported providers: aws, azure, gcp" + return 1 + ;; + esac + + export INTEGRATION_CLOUD_PROVIDER + + # Find docker-compose.yml + INTEGRATION_COMPOSE_FILE=$(find_compose_file) + export INTEGRATION_COMPOSE_FILE + + echo -e "${INTEGRATION_CYAN}Integration Setup${INTEGRATION_NC}" + echo " Cloud Provider: $INTEGRATION_CLOUD_PROVIDER" + echo " Module Root: $INTEGRATION_MODULE_ROOT" + echo "" + + # Call provider-specific setup + case "$INTEGRATION_CLOUD_PROVIDER" in + aws) + _setup_aws + ;; + azure) + _setup_azure + ;; + gcp) + _setup_gcp + ;; + esac +} + +integration_teardown() { + echo "" + echo -e "${INTEGRATION_CYAN}Integration Teardown${INTEGRATION_NC}" + + # Call provider-specific teardown + case "$INTEGRATION_CLOUD_PROVIDER" in + aws) + _teardown_aws + ;; + azure) + _teardown_azure + ;; + gcp) + _teardown_gcp + ;; + esac +} + +# ============================================================================= +# AWS Provider (LocalStack + Moto) +# ============================================================================= + +_setup_aws() { + echo " LocalStack: $LOCALSTACK_ENDPOINT" + echo " Moto: $MOTO_ENDPOINT" + echo "" + + # Configure OpenTofu/Terraform S3 backend for LocalStack + # These settings allow the S3 backend to work with LocalStack's S3 emulation + export TOFU_INIT_VARIABLES="${TOFU_INIT_VARIABLES:-}" + TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=force_path_style=true" + TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=skip_credentials_validation=true" + TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=skip_metadata_api_check=true" + TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=skip_region_validation=true" + TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=endpoints={s3=\"$LOCALSTACK_ENDPOINT\",dynamodb=\"$LOCALSTACK_ENDPOINT\"}" + export TOFU_INIT_VARIABLES + + # Start containers if compose file exists + if [[ -n "$INTEGRATION_COMPOSE_FILE" ]]; then + _start_localstack + else + echo -e "${INTEGRATION_YELLOW}Warning: No docker-compose.yml found, skipping container startup${INTEGRATION_NC}" + fi +} + +_teardown_aws() { + if [[ -n "$INTEGRATION_COMPOSE_FILE" ]]; then + _stop_localstack + fi +} + +_start_localstack() { + echo -e " Starting LocalStack..." + docker compose -f "$INTEGRATION_COMPOSE_FILE" up -d 2>/dev/null + + echo -n " Waiting for LocalStack to be ready" + local max_attempts=30 + local attempt=0 + + while [[ $attempt -lt $max_attempts ]]; do + if curl -s "$LOCALSTACK_ENDPOINT/_localstack/health" 2>/dev/null | jq -e '.services.s3 == "running"' > /dev/null 2>&1; then + echo "" + echo -e " ${INTEGRATION_GREEN}LocalStack is ready${INTEGRATION_NC}" + echo "" + return 0 + fi + attempt=$((attempt + 1)) + sleep 2 + echo -n "." + done + + echo "" + echo -e " ${INTEGRATION_RED}LocalStack failed to start${INTEGRATION_NC}" + return 1 +} + +_stop_localstack() { + echo " Stopping LocalStack..." + docker compose -f "$INTEGRATION_COMPOSE_FILE" down -v 2>/dev/null || true +} + +# ============================================================================= +# Azure Provider (Azure Mock) +# ============================================================================= + +_setup_azure() { + echo " Azure Mock: $AZURE_MOCK_ENDPOINT" + echo "" + + # Azure tests use: + # - Azure Mock for ARM APIs (CDN, DNS, etc.) AND Blob Storage (terraform state) + # - nginx proxy to redirect *.blob.core.windows.net to Azure Mock + + # Install the self-signed certificate for nginx proxy + # This allows the Azure SDK to trust the proxy for blob storage + if [[ -f /usr/local/share/ca-certificates/smocker.crt ]]; then + echo -n " Installing TLS certificate..." + update-ca-certificates >/dev/null 2>&1 || true + # Also set for Python/requests (used by Azure CLI) + export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt + export CURL_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt + echo -e " ${INTEGRATION_GREEN}done${INTEGRATION_NC}" + fi + + # Start containers if compose file exists + if [[ -n "$INTEGRATION_COMPOSE_FILE" ]]; then + _start_azure_mock + else + echo -e "${INTEGRATION_YELLOW}Warning: No docker-compose.yml found, skipping container startup${INTEGRATION_NC}" + fi + + # Configure Azure CLI to work with mock + _configure_azure_cli +} + +_teardown_azure() { + if [[ -n "$INTEGRATION_COMPOSE_FILE" ]]; then + _stop_azure_mock + fi +} + +_start_azure_mock() { + echo -e " Starting Azure Mock..." + docker compose -f "$INTEGRATION_COMPOSE_FILE" up -d azure-mock nginx-proxy smocker 2>/dev/null + + # Wait for Azure Mock + echo -n " Waiting for Azure Mock to be ready" + local max_attempts=30 + local attempt=0 + + while [[ $attempt -lt $max_attempts ]]; do + if curl -s "$AZURE_MOCK_ENDPOINT/health" 2>/dev/null | jq -e '.status == "ok"' > /dev/null 2>&1; then + echo "" + echo -e " ${INTEGRATION_GREEN}Azure Mock is ready${INTEGRATION_NC}" + break + fi + attempt=$((attempt + 1)) + sleep 2 + echo -n "." + done + + if [[ $attempt -ge $max_attempts ]]; then + echo "" + echo -e " ${INTEGRATION_RED}Azure Mock failed to start${INTEGRATION_NC}" + return 1 + fi + + # Create tfstate container in Azure Mock (required by azurerm backend) + # The account name comes from Host header, path is just /{container} + echo -n " Creating tfstate container..." + curl -s -X PUT "${AZURE_MOCK_ENDPOINT}/tfstate?restype=container" \ + -H "Host: devstoreaccount1.blob.core.windows.net" \ + -H "x-ms-version: 2021-06-08" >/dev/null 2>&1 + echo -e " ${INTEGRATION_GREEN}done${INTEGRATION_NC}" + + # Wait for nginx proxy to be ready (handles blob storage redirect) + echo -n " Waiting for nginx proxy to be ready" + attempt=0 + + while [[ $attempt -lt $max_attempts ]]; do + if curl -sk "https://localhost:443/mocks" >/dev/null 2>&1; then + echo "" + echo -e " ${INTEGRATION_GREEN}nginx proxy is ready${INTEGRATION_NC}" + break + fi + attempt=$((attempt + 1)) + sleep 2 + echo -n "." + done + + if [[ $attempt -ge $max_attempts ]]; then + echo "" + echo -e " ${INTEGRATION_YELLOW}Warning: nginx proxy health check failed, continuing anyway${INTEGRATION_NC}" + fi + + echo "" + return 0 +} + +_stop_azure_mock() { + echo " Stopping Azure Mock..." + docker compose -f "$INTEGRATION_COMPOSE_FILE" down -v 2>/dev/null || true +} + +_configure_azure_cli() { + # Check if Azure CLI is available + if ! command -v az &>/dev/null; then + echo -e " ${INTEGRATION_YELLOW}Warning: Azure CLI not installed, skipping configuration${INTEGRATION_NC}" + return 0 + fi + + echo "" + echo -e " ${INTEGRATION_CYAN}Configuring Azure CLI...${INTEGRATION_NC}" + + local azure_dir="$HOME/.azure" + mkdir -p "$azure_dir" + + # Generate timestamps for token + local now=$(date +%s) + local exp=$((now + 86400)) # 24 hours from now + + # Create the azureProfile.json (subscription info) + cat > "$azure_dir/azureProfile.json" << EOF +{ + "installationId": "mock-installation-id", + "subscriptions": [ + { + "id": "${ARM_SUBSCRIPTION_ID}", + "name": "Mock Subscription", + "state": "Enabled", + "user": { + "name": "${ARM_CLIENT_ID}", + "type": "servicePrincipal" + }, + "isDefault": true, + "tenantId": "${ARM_TENANT_ID}", + "environmentName": "AzureCloud" + } + ] +} +EOF + + # Create the service principal secret storage file + # This is where Azure CLI stores secrets for service principals after login + # Format must match what Azure CLI identity.py expects (uses 'tenant' not 'tenant_id') + cat > "$azure_dir/service_principal_entries.json" << EOF +[ + { + "client_id": "${ARM_CLIENT_ID}", + "tenant": "${ARM_TENANT_ID}", + "client_secret": "${ARM_CLIENT_SECRET}" + } +] +EOF + + # Set proper permissions + chmod 600 "$azure_dir"/*.json + + echo -e " ${INTEGRATION_GREEN}Azure CLI configured with mock credentials${INTEGRATION_NC}" + return 0 +} + +# ============================================================================= +# GCP Provider (Fake GCS Server) - Placeholder +# ============================================================================= + +_setup_gcp() { + echo -e "${INTEGRATION_YELLOW}GCP provider setup not yet implemented${INTEGRATION_NC}" + echo " Fake GCS Server endpoint would be configured here" + echo "" +} + +_teardown_gcp() { + echo -e "${INTEGRATION_YELLOW}GCP provider teardown not yet implemented${INTEGRATION_NC}" +} + +# ============================================================================= +# Utility Functions +# ============================================================================= + +find_compose_file() { + local search_paths=( + "${BATS_TEST_DIRNAME:-}/docker-compose.yml" + "${BATS_TEST_DIRNAME:-}/../docker-compose.yml" + "${INTEGRATION_MODULE_ROOT}/tests/integration/docker-compose.yml" + ) + + for path in "${search_paths[@]}"; do + if [[ -f "$path" ]]; then + echo "$path" + return 0 + fi + done + + # Return success with empty output - compose file is optional + # (containers may already be managed by the test runner) + return 0 +} + +# ============================================================================= +# AWS Local Commands +# ============================================================================= + +# Execute AWS CLI against LocalStack +aws_local() { + aws --endpoint-url="$LOCALSTACK_ENDPOINT" --no-cli-pager --no-cli-auto-prompt "$@" +} + +# Execute AWS CLI against Moto (for CloudFront) +aws_moto() { + aws --endpoint-url="$MOTO_ENDPOINT" --no-cli-pager --no-cli-auto-prompt "$@" +} + +# ============================================================================= +# Azure Mock Commands +# ============================================================================= + +# Execute a GET request against Azure Mock API +# Usage: azure_mock "/subscriptions/sub-id/resourceGroups/rg/providers/Microsoft.Cdn/profiles/profile-name" +azure_mock() { + local path="$1" + curl -s "${AZURE_MOCK_ENDPOINT}${path}" 2>/dev/null +} + +# Execute a PUT request against Azure Mock API +# Usage: azure_mock_put "/path" '{"json": "body"}' +azure_mock_put() { + local path="$1" + local body="$2" + curl -s -X PUT "${AZURE_MOCK_ENDPOINT}${path}" \ + -H "Content-Type: application/json" \ + -d "$body" 2>/dev/null +} + +# Execute a DELETE request against Azure Mock API +# Usage: azure_mock_delete "/path" +azure_mock_delete() { + local path="$1" + curl -s -X DELETE "${AZURE_MOCK_ENDPOINT}${path}" 2>/dev/null +} + +# ============================================================================= +# Workflow Execution +# ============================================================================= + +# Run a nullplatform workflow +# Usage: run_workflow "deployment/workflows/initial.yaml" +run_workflow() { + local workflow="$1" + local full_path + + # Resolve path relative to module root + if [[ "$workflow" = /* ]]; then + full_path="$workflow" + else + full_path="$INTEGRATION_MODULE_ROOT/$workflow" + fi + + echo -e "${INTEGRATION_CYAN}Running workflow:${INTEGRATION_NC} $workflow" + np service workflow exec --workflow "$full_path" +} + +# ============================================================================= +# Context Helpers +# ============================================================================= + +# Load context from a JSON file +# Usage: load_context "resources/context.json" +load_context() { + local context_file="$1" + local full_path + + # Resolve path relative to module root + if [[ "$context_file" = /* ]]; then + full_path="$context_file" + else + full_path="$INTEGRATION_MODULE_ROOT/$context_file" + fi + + if [[ ! -f "$full_path" ]]; then + echo -e "${INTEGRATION_RED}Context file not found: $full_path${INTEGRATION_NC}" + return 1 + fi + + export CONTEXT=$(cat "$full_path") + echo -e " ${INTEGRATION_CYAN}Loaded context from:${INTEGRATION_NC} $context_file" +} + +# Override a value in the current CONTEXT +# Usage: override_context "providers.networking.zone_id" "Z1234567890" +override_context() { + local key="$1" + local value="$2" + + if [[ -z "$CONTEXT" ]]; then + echo -e "${INTEGRATION_RED}Error: CONTEXT is not set. Call load_context first.${INTEGRATION_NC}" + return 1 + fi + + CONTEXT=$(echo "$CONTEXT" | jq --arg k "$key" --arg v "$value" 'setpath($k | split("."); $v)') + export CONTEXT +} + +# ============================================================================= +# Generic Assertions +# ============================================================================= + +# Assert command succeeds +# Usage: assert_success "aws s3 ls" +assert_success() { + local cmd="$1" + local description="${2:-Command succeeds}" + echo -ne " ${INTEGRATION_CYAN}Assert:${INTEGRATION_NC} ${description} ... " + + if eval "$cmd" >/dev/null 2>&1; then + _assert_result "true" + else + _assert_result "false" + return 1 + fi +} + +# Assert command fails +# Usage: assert_failure "aws s3api head-bucket --bucket nonexistent" +assert_failure() { + local cmd="$1" + local description="${2:-Command fails}" + echo -ne " ${INTEGRATION_CYAN}Assert:${INTEGRATION_NC} ${description} ... " + + if eval "$cmd" >/dev/null 2>&1; then + _assert_result "false" + return 1 + else + _assert_result "true" + fi +} + +# Assert output contains string +# Usage: result=$(some_command); assert_contains "$result" "expected" +assert_contains() { + local haystack="$1" + local needle="$2" + local description="${3:-Output contains '$needle'}" + echo -ne " ${INTEGRATION_CYAN}Assert:${INTEGRATION_NC} ${description} ... " + + if [[ "$haystack" == *"$needle"* ]]; then + _assert_result "true" + else + _assert_result "false" + return 1 + fi +} + +# Assert values are equal +# Usage: assert_equals "$actual" "$expected" "Values match" +assert_equals() { + local actual="$1" + local expected="$2" + local description="${3:-Values are equal}" + echo -ne " ${INTEGRATION_CYAN}Assert:${INTEGRATION_NC} ${description} ... " + + if [[ "$actual" == "$expected" ]]; then + _assert_result "true" + else + _assert_result "false" + echo " Expected: $expected" + echo " Actual: $actual" + return 1 + fi +} + +# ============================================================================= +# API Mocking (Smocker) +# +# Smocker is used to mock the nullplatform API (api.nullplatform.com). +# Tests run in a container where api.nullplatform.com resolves to smocker. +# ============================================================================= + +# Clear all mocks from smocker and set up default mocks +# Usage: clear_mocks +clear_mocks() { + curl -s -X POST "${SMOCKER_HOST}/reset" >/dev/null 2>&1 + # Set up default mocks that are always needed + _setup_default_mocks +} + +# Set up default mocks that are always needed for np CLI +# These are internal API calls that np CLI makes automatically +_setup_default_mocks() { + # Token endpoint - np CLI always authenticates before making API calls + local token_mock + token_mock=$(cat <<'EOF' +[{ + "request": { + "method": "POST", + "path": "/token" + }, + "response": { + "status": 200, + "headers": {"Content-Type": "application/json"}, + "body": "{\"access_token\": \"test-integration-token\", \"token_type\": \"Bearer\", \"expires_in\": 3600}" + } +}] +EOF +) + curl -s -X POST "${SMOCKER_HOST}/mocks" \ + -H "Content-Type: application/json" \ + -d "$token_mock" >/dev/null 2>&1 +} + +# Mock an API request +# Usage with file: mock_request "GET" "/providers/123" "responses/provider.json" +# Usage inline: mock_request "POST" "/deployments" 201 '{"id": "new-dep"}' +# +# File format (JSON): +# { +# "status": 200, +# "headers": {"Content-Type": "application/json"}, // optional +# "body": { ... } +# } +mock_request() { + local method="$1" + local path="$2" + local status_or_file="$3" + local body="$4" + + local status + local response_body + local headers='{"Content-Type": "application/json"}' + + # Check if third argument is a file or a status code + if [[ -f "$status_or_file" ]]; then + # File mode - read status and body from file + local file_content + file_content=$(cat "$status_or_file") + status=$(echo "$file_content" | jq -r '.status // 200') + response_body=$(echo "$file_content" | jq -c '.body // {}') + local file_headers + file_headers=$(echo "$file_content" | jq -c '.headers // null') + if [[ "$file_headers" != "null" ]]; then + headers="$file_headers" + fi + elif [[ -f "${INTEGRATION_MODULE_ROOT}/$status_or_file" ]]; then + # File mode with relative path + local file_content + file_content=$(cat "${INTEGRATION_MODULE_ROOT}/$status_or_file") + status=$(echo "$file_content" | jq -r '.status // 200') + response_body=$(echo "$file_content" | jq -c '.body // {}') + local file_headers + file_headers=$(echo "$file_content" | jq -c '.headers // null') + if [[ "$file_headers" != "null" ]]; then + headers="$file_headers" + fi + else + # Inline mode - status code and body provided directly + status="$status_or_file" + response_body="$body" + fi + + # Build smocker mock definition + # Note: Smocker expects body as a string, not a JSON object + local mock_definition + mock_definition=$(jq -n \ + --arg method "$method" \ + --arg path "$path" \ + --argjson status "$status" \ + --arg body "$response_body" \ + --argjson headers "$headers" \ + '[{ + "request": { + "method": $method, + "path": $path + }, + "response": { + "status": $status, + "headers": $headers, + "body": $body + } + }]') + + # Register mock with smocker + local result + local http_code + http_code=$(curl -s -w "%{http_code}" -o /tmp/smocker_response.json -X POST "${SMOCKER_HOST}/mocks" \ + -H "Content-Type: application/json" \ + -d "$mock_definition" 2>&1) + result=$(cat /tmp/smocker_response.json 2>/dev/null) + + if [[ "$http_code" != "200" ]]; then + local error_msg + error_msg=$(echo "$result" | jq -r '.message // "Unknown error"' 2>/dev/null) + echo -e "${INTEGRATION_RED}Failed to register mock (HTTP ${http_code}): ${error_msg}${INTEGRATION_NC}" + return 1 + fi + + echo -e " ${INTEGRATION_CYAN}Mock:${INTEGRATION_NC} ${method} ${path} -> ${status}" +} + +# Mock a request with query parameters +# Usage: mock_request_with_query "GET" "/providers" "type=assets-repository" 200 '[...]' +mock_request_with_query() { + local method="$1" + local path="$2" + local query="$3" + local status="$4" + local body="$5" + + local mock_definition + mock_definition=$(jq -n \ + --arg method "$method" \ + --arg path "$path" \ + --arg query "$query" \ + --argjson status "$status" \ + --arg body "$body" \ + '[{ + "request": { + "method": $method, + "path": $path, + "query_params": ($query | split("&") | map(split("=") | {(.[0]): [.[1]]}) | add) + }, + "response": { + "status": $status, + "headers": {"Content-Type": "application/json"}, + "body": $body + } + }]') + + curl -s -X POST "${SMOCKER_HOST}/mocks" \ + -H "Content-Type: application/json" \ + -d "$mock_definition" >/dev/null 2>&1 + + echo -e " ${INTEGRATION_CYAN}Mock:${INTEGRATION_NC} ${method} ${path}?${query} -> ${status}" +} + +# Verify that a mock was called +# Usage: assert_mock_called "GET" "/providers/123" +assert_mock_called() { + local method="$1" + local path="$2" + echo -ne " ${INTEGRATION_CYAN}Assert:${INTEGRATION_NC} ${method} ${path} was called ... " + + local history + history=$(curl -s "${SMOCKER_HOST}/history" 2>/dev/null) + + local called + called=$(echo "$history" | jq -r \ + --arg method "$method" \ + --arg path "$path" \ + '[.[] | select(.request.method == $method and .request.path == $path)] | length') + + if [[ "$called" -gt 0 ]]; then + _assert_result "true" + else + _assert_result "false" + return 1 + fi +} + +# Get the number of times a mock was called +# Usage: count=$(mock_call_count "GET" "/providers/123") +mock_call_count() { + local method="$1" + local path="$2" + + local history + history=$(curl -s "${SMOCKER_HOST}/history" 2>/dev/null) + + echo "$history" | jq -r \ + --arg method "$method" \ + --arg path "$path" \ + '[.[] | select(.request.method == $method and .request.path == $path)] | length' +} + +# ============================================================================= +# Help / Documentation +# ============================================================================= + +# Display help for all available integration test utilities +test_help() { + cat <<'EOF' +================================================================================ + Integration Test Helpers Reference +================================================================================ + +SETUP & TEARDOWN +---------------- + integration_setup --cloud-provider + Initialize integration test environment for the specified cloud provider. + Call this in setup_file(). + + integration_teardown + Clean up integration test environment. + Call this in teardown_file(). + +AWS LOCAL COMMANDS +------------------ + aws_local + Execute AWS CLI against LocalStack (S3, Route53, DynamoDB, etc.) + Example: aws_local s3 ls + + aws_moto + Execute AWS CLI against Moto (CloudFront) + Example: aws_moto cloudfront list-distributions + +AZURE MOCK COMMANDS +------------------- + azure_mock "" + Execute a GET request against Azure Mock API. + Example: azure_mock "/subscriptions/sub-id/resourceGroups/rg/providers/Microsoft.Cdn/profiles/my-profile" + + azure_mock_put "" '' + Execute a PUT request against Azure Mock API. + Example: azure_mock_put "/subscriptions/.../profiles/my-profile" '{"location": "eastus"}' + + azure_mock_delete "" + Execute a DELETE request against Azure Mock API. + Example: azure_mock_delete "/subscriptions/.../profiles/my-profile" + +WORKFLOW EXECUTION +------------------ + run_workflow "" + Run a nullplatform workflow file. + Path is relative to module root. + Example: run_workflow "frontend/deployment/workflows/initial.yaml" + +CONTEXT HELPERS +--------------- + load_context "" + Load a context JSON file into the CONTEXT environment variable. + Example: load_context "tests/resources/context.json" + + override_context "" "" + Override a value in the current CONTEXT. + Example: override_context "providers.networking.zone_id" "Z1234567890" + +API MOCKING (Smocker) +--------------------- + clear_mocks + Clear all mocks and set up default mocks (token endpoint). + Call this at the start of each test. + + mock_request "" "" "" + Mock an API request using a response file. + File format: { "status": 200, "body": {...} } + Example: mock_request "GET" "/provider/123" "mocks/provider.json" + + mock_request "" "" '' + Mock an API request with inline response. + Example: mock_request "POST" "/deployments" 201 '{"id": "new"}' + + mock_request_with_query "" "" "" '' + Mock a request with query parameters. + Example: mock_request_with_query "GET" "/items" "type=foo" 200 '[...]' + + assert_mock_called "" "" + Assert that a mock endpoint was called. + Example: assert_mock_called "GET" "/provider/123" + + mock_call_count "" "" + Get the number of times a mock was called. + Example: count=$(mock_call_count "GET" "/provider/123") + +AWS ASSERTIONS +-------------- + assert_s3_bucket_exists "" + Assert an S3 bucket exists in LocalStack. + + assert_s3_bucket_not_exists "" + Assert an S3 bucket does not exist. + + assert_cloudfront_exists "" + Assert a CloudFront distribution exists (matched by comment). + + assert_cloudfront_not_exists "" + Assert a CloudFront distribution does not exist. + + assert_route53_record_exists "" "" + Assert a Route53 record exists. + Example: assert_route53_record_exists "app.example.com" "A" + + assert_route53_record_not_exists "" "" + Assert a Route53 record does not exist. + + assert_dynamodb_table_exists "" + Assert a DynamoDB table exists. + + assert_dynamodb_table_not_exists "" + Assert a DynamoDB table does not exist. + +GENERIC ASSERTIONS +------------------ + assert_success "" [""] + Assert a command succeeds (exit code 0). + + assert_failure "" [""] + Assert a command fails (non-zero exit code). + + assert_contains "" "" [""] + Assert a string contains a substring. + + assert_equals "" "" [""] + Assert two values are equal. + +ENVIRONMENT VARIABLES +--------------------- + LOCALSTACK_ENDPOINT LocalStack URL (default: http://localhost:4566) + MOTO_ENDPOINT Moto URL (default: http://localhost:5555) + AZURE_MOCK_ENDPOINT Azure Mock URL (default: http://localhost:8090) + SMOCKER_HOST Smocker admin URL (default: http://localhost:8081) + AWS_ENDPOINT_URL AWS endpoint for CLI (default: $LOCALSTACK_ENDPOINT) + ARM_CLIENT_ID Azure client ID for mock (default: mock-client-id) + ARM_CLIENT_SECRET Azure client secret for mock (default: mock-client-secret) + ARM_TENANT_ID Azure tenant ID for mock (default: mock-tenant-id) + ARM_SUBSCRIPTION_ID Azure subscription ID for mock (default: mock-subscription-id) + INTEGRATION_MODULE_ROOT Root directory of the module being tested + +================================================================================ +EOF +} diff --git a/testing/localstack-provider/provider_override.tf b/testing/localstack-provider/provider_override.tf new file mode 100644 index 00000000..587982c2 --- /dev/null +++ b/testing/localstack-provider/provider_override.tf @@ -0,0 +1,38 @@ +# Override file for LocalStack + Moto testing +# This file is copied into the module directory during integration tests +# to configure the AWS provider to use mock endpoints +# +# LocalStack (port 4566): S3, Route53, STS, IAM, DynamoDB, ACM +# Moto (port 5000): CloudFront + +# Set CloudFront endpoint for AWS CLI commands (used by cache invalidation) +variable "distribution_cloudfront_endpoint_url" { + default = "http://moto:5000" +} + +provider "aws" { + region = var.aws_provider.region + access_key = "test" + secret_key = "test" + skip_credentials_validation = true + skip_metadata_api_check = true + skip_requesting_account_id = true + + endpoints { + # LocalStack services (using Docker service name) + s3 = "http://localstack:4566" + route53 = "http://localstack:4566" + sts = "http://localstack:4566" + iam = "http://localstack:4566" + dynamodb = "http://localstack:4566" + acm = "http://localstack:4566" + # Moto services (CloudFront not in LocalStack free tier) + cloudfront = "http://moto:5000" + } + + default_tags { + tags = var.provider_resource_tags_json + } + + s3_use_path_style = true +} diff --git a/testing/run_bats_tests.sh b/testing/run_bats_tests.sh new file mode 100755 index 00000000..d17384e6 --- /dev/null +++ b/testing/run_bats_tests.sh @@ -0,0 +1,194 @@ +#!/bin/bash +# ============================================================================= +# Test runner for all BATS tests across all modules +# +# Usage: +# ./testing/run_bats_tests.sh # Run all tests +# ./testing/run_bats_tests.sh frontend # Run tests for frontend module only +# ./testing/run_bats_tests.sh frontend/deployment/tests # Run specific test directory +# ============================================================================= + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +cd "$PROJECT_ROOT" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +NC='\033[0m' + +# Track failed tests globally +FAILED_TESTS=() +CURRENT_TEST_FILE="" + +# Check if bats is installed +if ! command -v bats &> /dev/null; then + echo -e "${RED}bats-core is not installed${NC}" + echo "" + echo "Install with:" + echo " brew install bats-core # macOS" + echo " apt install bats # Ubuntu/Debian" + echo " apk add bats # Alpine" + echo " choco install bats # Windows" + exit 1 +fi + +# Check if jq is installed +if ! command -v jq &> /dev/null; then + echo -e "${RED}jq is not installed${NC}" + echo "" + echo "Install with:" + echo " brew install jq # macOS" + echo " apt install jq # Ubuntu/Debian" + echo " apk add jq # Alpine" + echo " choco install jq # Windows" + exit 1 +fi + +# Find all test directories +find_test_dirs() { + find . -mindepth 3 -maxdepth 3 -type d -name "tests" -not -path "*/node_modules/*" 2>/dev/null | sort +} + +# Get module name from test path +get_module_name() { + local path="$1" + echo "$path" | sed 's|^\./||' | cut -d'/' -f1 +} + +# Run tests for a specific directory +run_tests_in_dir() { + local test_dir="$1" + local module_name + module_name=$(get_module_name "$test_dir") + + # Find all .bats files, excluding integration directory (integration tests are run separately) + local bats_files + bats_files=$(find "$test_dir" -name "*.bats" -not -path "*/integration/*" 2>/dev/null) + + if [ -z "$bats_files" ]; then + return 0 + fi + + echo -e "${CYAN}[$module_name]${NC} Running BATS tests in $test_dir" + echo "" + + # Create temp file to capture output + local temp_output + temp_output=$(mktemp) + + local exit_code=0 + ( + cd "$test_dir" + # Use script to force TTY for colored output + # Exclude integration directory - those tests are run by run_integration_tests.sh + # --print-output-on-failure: only show test output when a test fails + script -q /dev/null bats --formatter pretty --print-output-on-failure $(find . -name "*.bats" -not -path "*/integration/*" | sort) + ) 2>&1 | tee "$temp_output" || exit_code=$? + + # Extract failed tests from output + # Strip all ANSI escape codes (colors, cursor movements, etc.) + local clean_output + clean_output=$(perl -pe 's/\e\[[0-9;]*[a-zA-Z]//g; s/\e\][^\a]*\a//g' "$temp_output" 2>/dev/null || cat "$temp_output") + + local current_file="" + while IFS= read -r line; do + # Track current test file (lines containing .bats without test markers) + if [[ "$line" == *".bats"* ]] && [[ "$line" != *"✗"* ]] && [[ "$line" != *"✓"* ]]; then + # Extract the file path (e.g., network/route53/setup_test.bats) + current_file=$(echo "$line" | grep -oE '[a-zA-Z0-9_/.-]+\.bats' | head -1) + fi + + # Find failed test lines + if [[ "$line" == *"✗"* ]]; then + # Extract test name: get text after ✗, clean up any remaining control chars + local failed_test_name + failed_test_name=$(echo "$line" | sed 's/.*✗[[:space:]]*//' | sed 's/[[:space:]]*$//' | tr -d '\r') + # Only add if we got a valid test name + if [[ -n "$failed_test_name" ]]; then + FAILED_TESTS+=("${module_name}|${current_file}|${failed_test_name}") + fi + fi + done <<< "$clean_output" + + rm -f "$temp_output" + echo "" + + return $exit_code +} + +echo "" +echo "========================================" +echo " BATS Tests (Unit)" +echo "========================================" +echo "" + +# Print available test helpers reference +source "$SCRIPT_DIR/assertions.sh" +test_help +echo "" + +# Export BASH_ENV to auto-source assertions.sh in all bats test subshells +export BASH_ENV="$SCRIPT_DIR/assertions.sh" + +HAS_FAILURES=0 + +if [ -n "$1" ]; then + # Run tests for specific module or directory + if [ -d "$1" ] && [[ "$1" == *"/tests"* || "$1" == *"/tests" ]]; then + # Direct test directory path + run_tests_in_dir "$1" || HAS_FAILURES=1 + elif [ -d "$1" ]; then + # Module name (e.g., "frontend") - find all test directories under it + module_test_dirs=$(find "$1" -mindepth 2 -maxdepth 2 -type d -name "tests" 2>/dev/null | sort) + if [ -z "$module_test_dirs" ]; then + echo -e "${RED}No test directories found in: $1${NC}" + exit 1 + fi + for test_dir in $module_test_dirs; do + run_tests_in_dir "$test_dir" || HAS_FAILURES=1 + done + else + echo -e "${RED}Directory not found: $1${NC}" + echo "" + echo "Available modules with tests:" + for dir in $(find_test_dirs); do + echo " - $(get_module_name "$dir")" + done | sort -u + exit 1 + fi +else + # Run all tests + test_dirs=$(find_test_dirs) + + if [ -z "$test_dirs" ]; then + echo -e "${YELLOW}No test directories found${NC}" + exit 0 + fi + + for test_dir in $test_dirs; do + run_tests_in_dir "$test_dir" || HAS_FAILURES=1 + done +fi + +# Show summary of failed tests +if [ ${#FAILED_TESTS[@]} -gt 0 ]; then + echo "" + echo "========================================" + echo " Failed Tests Summary" + echo "========================================" + echo "" + for failed_test in "${FAILED_TESTS[@]}"; do + # Parse module|file|test_name format + module_name=$(echo "$failed_test" | cut -d'|' -f1) + file_name=$(echo "$failed_test" | cut -d'|' -f2) + test_name=$(echo "$failed_test" | cut -d'|' -f3) + echo -e " ${RED}✗${NC} ${CYAN}[$module_name]${NC} ${RED}$file_name${NC} $test_name" + done + echo "" + exit 1 +fi + +echo -e "${GREEN}All BATS tests passed!${NC}" diff --git a/testing/run_integration_tests.sh b/testing/run_integration_tests.sh new file mode 100755 index 00000000..0a020f60 --- /dev/null +++ b/testing/run_integration_tests.sh @@ -0,0 +1,223 @@ +#!/bin/bash +# ============================================================================= +# Test runner for all integration tests (BATS) across all modules +# +# Tests run inside a Docker container with: +# - LocalStack for AWS emulation +# - Moto for CloudFront emulation +# - Smocker for nullplatform API mocking +# +# Usage: +# ./testing/run_integration_tests.sh # Run all tests +# ./testing/run_integration_tests.sh frontend # Run tests for frontend module only +# ./testing/run_integration_tests.sh --build # Rebuild containers before running +# ./testing/run_integration_tests.sh -v|--verbose # Show output of passing tests +# ============================================================================= + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +cd "$PROJECT_ROOT" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +NC='\033[0m' + +# Parse arguments +MODULE="" +BUILD_FLAG="" +VERBOSE="" + +for arg in "$@"; do + case $arg in + --build) + BUILD_FLAG="--build" + ;; + -v|--verbose) + VERBOSE="--show-output-of-passing-tests" + ;; + *) + MODULE="$arg" + ;; + esac +done + +# Docker compose file location +COMPOSE_FILE="$SCRIPT_DIR/docker/docker-compose.integration.yml" + +# Check if docker is installed +if ! command -v docker &> /dev/null; then + echo -e "${RED}docker is not installed${NC}" + echo "" + echo "Install with:" + echo " brew install docker # macOS" + echo " apt install docker.io # Ubuntu/Debian" + echo " apk add docker # Alpine" + echo " choco install docker # Windows" + exit 1 +fi + +# Check if docker compose file exists +if [ ! -f "$COMPOSE_FILE" ]; then + echo -e "${RED}Docker compose file not found: $COMPOSE_FILE${NC}" + exit 1 +fi + +# Generate certificates if they don't exist +CERT_DIR="$SCRIPT_DIR/docker/certs" +if [ ! -f "$CERT_DIR/cert.pem" ] || [ ! -f "$CERT_DIR/key.pem" ]; then + echo -e "${CYAN}Generating TLS certificates...${NC}" + "$SCRIPT_DIR/docker/generate-certs.sh" +fi + +# Find all integration test directories +find_test_dirs() { + find . -type d -name "integration" -path "*/tests/*" -not -path "*/node_modules/*" 2>/dev/null | sort +} + +# Get module name from test path +get_module_name() { + local path="$1" + echo "$path" | sed 's|^\./||' | cut -d'/' -f1 +} + +# Cleanup function +cleanup() { + echo "" + echo -e "${CYAN}Stopping containers...${NC}" + docker compose -f "$COMPOSE_FILE" down -v 2>/dev/null || true +} + +echo "" +echo "========================================" +echo " Integration Tests (Containerized)" +echo "========================================" +echo "" + +# Print available test helpers reference +source "$SCRIPT_DIR/integration_helpers.sh" +test_help +echo "" + +# Set trap for cleanup +trap cleanup EXIT + +# Build test runner and azure-mock images if needed +echo -e "${CYAN}Building containers...${NC}" +docker compose -f "$COMPOSE_FILE" build $BUILD_FLAG test-runner azure-mock 2>&1 | grep -v "^$" || true +echo "" + +# Start infrastructure services +echo -e "${CYAN}Starting infrastructure services...${NC}" +docker compose -f "$COMPOSE_FILE" up -d localstack moto azure-mock smocker nginx-proxy 2>&1 | grep -v "^$" || true + +# Wait for services to be healthy +echo -n "Waiting for services to be ready" +max_attempts=30 +attempt=0 + +while [ $attempt -lt $max_attempts ]; do + # Check health via curl (most reliable) + localstack_ok=$(curl -s "http://localhost:4566/_localstack/health" 2>/dev/null | jq -e '.services.s3 == "running"' >/dev/null 2>&1 && echo "yes" || echo "no") + moto_ok=$(curl -s "http://localhost:5555/moto-api/" >/dev/null 2>&1 && echo "yes" || echo "no") + azure_mock_ok=$(curl -s "http://localhost:8090/health" 2>/dev/null | jq -e '.status == "ok"' >/dev/null 2>&1 && echo "yes" || echo "no") + smocker_ok=$(curl -s "http://localhost:8081/version" >/dev/null 2>&1 && echo "yes" || echo "no") + nginx_ok=$(curl -sk "https://localhost:8443/mocks" >/dev/null 2>&1 && echo "yes" || echo "no") + + if [[ "$localstack_ok" == "yes" ]] && [[ "$moto_ok" == "yes" ]] && [[ "$azure_mock_ok" == "yes" ]] && [[ "$smocker_ok" == "yes" ]] && [[ "$nginx_ok" == "yes" ]]; then + echo "" + echo -e "${GREEN}All services ready${NC}" + break + fi + + attempt=$((attempt + 1)) + sleep 2 + echo -n "." +done + +if [ $attempt -eq $max_attempts ]; then + echo "" + echo -e "${RED}Services failed to start${NC}" + docker compose -f "$COMPOSE_FILE" logs + exit 1 +fi + +echo "" + +# Get smocker container IP for DNS resolution +SMOCKER_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' integration-smocker 2>/dev/null || echo "172.28.0.10") +export SMOCKER_IP + +# Determine which tests to run +if [ -n "$MODULE" ]; then + if [ -d "$MODULE" ]; then + TEST_PATHS=$(find "$MODULE" -type d -name "integration" -path "*/tests/*" 2>/dev/null | sort) + if [ -z "$TEST_PATHS" ]; then + echo -e "${RED}No integration test directories found in: $MODULE${NC}" + exit 1 + fi + else + echo -e "${RED}Directory not found: $MODULE${NC}" + echo "" + echo "Available modules with integration tests:" + for dir in $(find_test_dirs); do + echo " - $(get_module_name "$dir")" + done | sort -u + exit 1 + fi +else + TEST_PATHS=$(find_test_dirs) + if [ -z "$TEST_PATHS" ]; then + echo -e "${YELLOW}No integration test directories found${NC}" + exit 0 + fi +fi + +# Run tests for each directory +TOTAL_FAILED=0 + +for test_dir in $TEST_PATHS; do + module_name=$(get_module_name "$test_dir") + + # Find .bats files recursively (supports test_cases/ subfolder structure) + bats_files=$(find "$test_dir" -name "*.bats" 2>/dev/null | sort) + if [ -z "$bats_files" ]; then + continue + fi + + echo -e "${CYAN}[$module_name]${NC} Running integration tests in $test_dir" + echo "" + + # Strip leading ./ from test_dir for cleaner paths + container_test_dir="${test_dir#./}" + + # Build list of test files for bats (space-separated, container paths) + container_bats_files="" + for bats_file in $bats_files; do + container_path="/workspace/${bats_file#./}" + container_bats_files="$container_bats_files $container_path" + done + + # Run tests inside the container + docker compose -f "$COMPOSE_FILE" run --rm \ + -e PROJECT_ROOT=/workspace \ + -e SMOCKER_HOST=http://smocker:8081 \ + -e LOCALSTACK_ENDPOINT=http://localstack:4566 \ + -e MOTO_ENDPOINT=http://moto:5000 \ + -e AWS_ENDPOINT_URL=http://localstack:4566 \ + test-runner \ + -c "update-ca-certificates 2>/dev/null; bats --formatter pretty $VERBOSE $container_bats_files" || TOTAL_FAILED=$((TOTAL_FAILED + 1)) + + echo "" +done + +if [ $TOTAL_FAILED -gt 0 ]; then + echo -e "${RED}Some integration tests failed${NC}" + exit 1 +else + echo -e "${GREEN}All integration tests passed!${NC}" +fi diff --git a/testing/run_tofu_tests.sh b/testing/run_tofu_tests.sh new file mode 100755 index 00000000..1c1ee77f --- /dev/null +++ b/testing/run_tofu_tests.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# ============================================================================= +# Test runner for all OpenTofu/Terraform tests across all modules +# +# Usage: +# ./testing/run_tofu_tests.sh # Run all tests +# ./testing/run_tofu_tests.sh frontend # Run tests for frontend module only +# ./testing/run_tofu_tests.sh frontend/deployment/provider/aws/modules # Run specific test directory +# ============================================================================= + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +cd "$PROJECT_ROOT" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +NC='\033[0m' + +# Check if tofu is installed +if ! command -v tofu &> /dev/null; then + echo -e "${RED}OpenTofu is not installed${NC}" + echo "" + echo "Install with:" + echo " brew install opentofu # macOS" + echo " apt install tofu # Ubuntu/Debian" + echo " apk add opentofu # Alpine" + echo " choco install opentofu # Windows" + echo "" + echo "See https://opentofu.org/docs/intro/install/" + exit 1 +fi + +# Find all directories with .tftest.hcl files +find_test_dirs() { + find . -name "*.tftest.hcl" -not -path "*/node_modules/*" 2>/dev/null | xargs -I{} dirname {} | sort -u +} + +# Get module name from test path +get_module_name() { + local path="$1" + echo "$path" | sed 's|^\./||' | cut -d'/' -f1 +} + +# Run tests for a specific directory +run_tests_in_dir() { + local test_dir="$1" + local module_name=$(get_module_name "$test_dir") + + # Check if there are .tftest.hcl files + if ! ls "$test_dir"/*.tftest.hcl &>/dev/null; then + return 0 + fi + + echo -e "${CYAN}[$module_name]${NC} Running OpenTofu tests in $test_dir" + echo "" + + ( + cd "$test_dir" + + # Initialize if needed (without backend) + if [ ! -d ".terraform" ]; then + tofu init -backend=false -input=false >/dev/null 2>&1 || true + fi + + # Run tests + tofu test + ) + + echo "" +} + +echo "" +echo "========================================" +echo " OpenTofu Tests" +echo "========================================" +echo "" + +if [ -n "$1" ]; then + # Run tests for specific module or directory + if [ -d "$1" ] && ls "$1"/*.tftest.hcl &>/dev/null; then + # Direct test directory path with .tftest.hcl files + run_tests_in_dir "$1" + elif [ -d "$1" ]; then + # Module name (e.g., "frontend") - find all test directories under it + module_test_dirs=$(find "$1" -name "*.tftest.hcl" 2>/dev/null | xargs -I{} dirname {} | sort -u) + if [ -z "$module_test_dirs" ]; then + echo -e "${RED}No OpenTofu test files found in: $1${NC}" + exit 1 + fi + for test_dir in $module_test_dirs; do + run_tests_in_dir "$test_dir" + done + else + echo -e "${RED}Directory not found: $1${NC}" + echo "" + echo "Available modules with OpenTofu tests:" + for dir in $(find_test_dirs); do + echo " - $(get_module_name "$dir")" + done | sort -u + exit 1 + fi +else + # Run all tests + test_dirs=$(find_test_dirs) + + if [ -z "$test_dirs" ]; then + echo -e "${YELLOW}No OpenTofu test files found${NC}" + exit 0 + fi + + for test_dir in $test_dirs; do + run_tests_in_dir "$test_dir" + done +fi + +echo -e "${GREEN}All OpenTofu tests passed!${NC}" diff --git a/workflow.schema.json b/workflow.schema.json index 713d27c0..d972e698 100644 --- a/workflow.schema.json +++ b/workflow.schema.json @@ -3,8 +3,9 @@ "title": "Workflow", "additionalProperties": false, "type": "object", - "required": [ - "steps" + "anyOf": [ + { "required": ["steps"] }, + { "required": ["include"] } ], "properties": { "steps": {