From 813c31eb77d13a8e887229d1821b84ad473ac064 Mon Sep 17 00:00:00 2001 From: Ryan VanGundy Date: Wed, 14 May 2025 12:42:55 -0400 Subject: [PATCH 1/3] Add test suite, style, and cursorrules --- .cursorrules | 31 ++ .github/workflows/ci.yaml | 7 + .gitignore | 1 + Taskfile.yaml | 22 +- terraform/STYLE.md | 273 ++++++++++++++++++ terraform/backend/azurerm/main.tf | 4 +- terraform/backend/azurerm/test.tftest.hcl | 164 +++++++++++ terraform/backend/s3/main.tf | 125 ++++---- terraform/backend/s3/templates/backend.tftpl | 7 +- terraform/backend/s3/test.tftest.hcl | 216 ++++++++++++++ terraform/backend/s3/variables.tf | 24 +- terraform/cluster/azure-aks/main.tf | 1 + terraform/cluster/azure-aks/test.tftest.hcl | 217 ++++++++++++++ terraform/cluster/talos/main.tf | 6 +- .../talos/modules/machine/.terraform.lock.hcl | 24 ++ .../talos/modules/machine/test.tftest.hcl | 158 ++++++++++ terraform/cluster/talos/test.tftest.hcl | 258 +++++++++++++++++ terraform/gitops/flux/test.tftest.hcl | 118 ++++++++ terraform/network/azure-vnet/test.tftest.hcl | 101 +++++++ 19 files changed, 1676 insertions(+), 81 deletions(-) create mode 100644 .cursorrules create mode 100644 terraform/STYLE.md create mode 100644 terraform/backend/azurerm/test.tftest.hcl create mode 100644 terraform/backend/s3/test.tftest.hcl create mode 100644 terraform/cluster/azure-aks/test.tftest.hcl create mode 100644 terraform/cluster/talos/modules/machine/.terraform.lock.hcl create mode 100644 terraform/cluster/talos/modules/machine/test.tftest.hcl create mode 100644 terraform/cluster/talos/test.tftest.hcl create mode 100644 terraform/gitops/flux/test.tftest.hcl create mode 100644 terraform/network/azure-vnet/test.tftest.hcl diff --git a/.cursorrules b/.cursorrules new file mode 100644 index 00000000..4ab2b7a5 --- /dev/null +++ b/.cursorrules @@ -0,0 +1,31 @@ +# CRITICAL PLATFORM ENGINEERING RULES + +## CODE STYLE GUIDELINES +When writing terraform code, you must follow Windsor Core style guidelines in terraform/STYLE.md: +1. Module Structure +2. Documentation Style +3. Testing Patterns +4. Code Organization + +## TERRAFORM TEST ENGINEERING +When developing Terraform tests: +1. Start with empty mock providers: `mock_provider "provider" {}` and run tests immediately to identify validation failures. +2. Add mocks only for failing validations, starting with data sources and mocking at the lowest level possible (data sources over resources). Keep mock values minimal and focused on test requirements. +3. For count-based resources, use `length()` in assertions. +4. Only assert on resource values that are directly controlled by input variables, or on the impact of module default values in minimal configs. Do not assert on arbitrary resource values or implementation details. +5. Prioritize validating locals and complex logic, especially for naming, computed values, and conditional logic, to ensure module correctness. +6. When testing naming logic or locals, validate default and override behaviors in the minimal and full configuration tests, not in a separate test block. +7. Expand test comments to be descriptive, following the style of the most detailed test suites in the repo. +8. Combine negative tests for validation rules into a single test case using `expect_failures`, rather than creating separate tests for each validation rule. + +Reference: +- Internal: See "Testing Patterns" section in terraform/STYLE.md +- External: https://developer.hashicorp.com/terraform/language/tests + +## AUDIO NOTIFICATIONS +Must use `say` command with Samantha voice for: +- Test Completion: "All tests are now passing" +- Test Failure: "Test failure detected in [test name]" +- Source Code Bug: "Source code bug detected in [function]. Please review." +- User Input Needed: "User input required for [specific issue]" +- Work Complete: "Platform engineering work complete" diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 1cf52c62..ee7cdf8e 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -70,6 +70,13 @@ jobs: - name: Run terraform fmt run: terraform fmt -check -recursive + - name: Run Terraform Tests + run: | + find terraform -type f -name '*.tftest.hcl' | while read testfile; do + testdir=$(dirname "$testfile") + (cd "$testdir" && terraform init -input=false && terraform test) + done + - name: Create .docker-cache directory run: mkdir -p .windsor/.docker-cache diff --git a/.gitignore b/.gitignore index 01d98ae4..f199469c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.terraform/ # managed by windsor cli .windsor/ diff --git a/Taskfile.yaml b/Taskfile.yaml index 2ab1f35a..11f8707c 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -4,4 +4,24 @@ tasks: scan: desc: Scan for security vulnerabilities cmds: - - source .venv/bin/activate && checkov -d {{.CLI_ARGS | default "terraform/"}} 2>/dev/null \ No newline at end of file + - source .venv/bin/activate && checkov -d {{.CLI_ARGS | default "terraform/"}} 2>/dev/null + + test: + desc: Run Terraform tests (all or specific module) + cmds: + - | + MODULE={{.CLI_ARGS | default "terraform"}} + if [ -d "$MODULE" ]; then + find "$MODULE" -type f -name '*.tftest.hcl' | while read testfile; do + testdir=$(dirname "$testfile") + (cd "$testdir" && terraform init -input=false && terraform test) + done + else + echo "Module path '$MODULE' does not exist." + exit 1 + fi + + fmt: + desc: Check Terraform formatting + cmds: + - terraform fmt -recursive \ No newline at end of file diff --git a/terraform/STYLE.md b/terraform/STYLE.md new file mode 100644 index 00000000..877bde8a --- /dev/null +++ b/terraform/STYLE.md @@ -0,0 +1,273 @@ +# Windsor Core Code Style Guide + +## Best Practices + +1. Use consistent, descriptive resource names with underscores (_), never hyphens (-). +2. Minimize submodules; only use them for resource reuse within a parent module. Never use third-party modules. +3. Keep modules focused and small; avoid unnecessary abstraction. +4. Validate relevant user inputs with type constraints and validation blocks. +5. Use mock providers in tests to isolate module logic. +6. Group resources by logical function and use section headers for clarity. +7. Document all public variables and outputs clearly. +8. Prefer local variables for complex expressions or repeated logic. +9. Keep resource dependencies explicit using `depends_on` only when necessary. +10. Avoid inline comments inside resource blocks; use block-level comments for documentation. +11. Avoid using `terraform_remote_state` data sources; prefer explicit variable passing between modules. +12. Avoid using data sources to implicitly reference resources; prefer explicit resource references or variable passing. +13. Parameterize module inputs as variables rather than hardcoding values or using data sources. +14. Mark sensitive values (credentials, keys, tokens) with `sensitive: true` in both input and output variables. + +## Folder Structure + +- The top-level folders represent generic system-oriented layers (e.g., `backend`, `network`, `cluster`, `gitops`). +- The second-level folders represent different implementations of that layer. +- Implementations may be prefixed by vendor (e.g., `azure-`, `aws-`). +- This structure allows for clear separation of concerns and easy addition of new implementations. + +Example: +``` +/ +├── backend/ +│ ├── azurerm/ +│ └── s3/ +├── network/ +│ ├── azure-vnet/ +│ └── aws-vpc/ +├── cluster/ +│ ├── talos/ +│ └── aws-eks/ +└── gitops/ + └── flux/ +``` + +### Resource Naming +- Always use underscores (_) for resource names, never hyphens (-). +- Resource names should be descriptive and consistent across the codebase. +- Avoid redundancy; if the resource type is a "network," do not include "_network" in the name. +- Example: `resource "azurerm_virtual_network" "core" { ... }` instead of `resource "azurerm_virtual_network" "core_network" { ... }`. + +### Submodules +- Submodules should be minimized. +- Only introduce a submodule when a specific resource or resource group is reused in several places within its parent module. +- Submodules should not be used for simple grouping or organization. +- Never use third-party modules; all submodules must be defined within this repository. +- Example use case: a submodule for a repeated storage resource used by multiple components in the parent module. + +## Module Structure + +A typical Terraform module should contain: +1. Main module file (`main.tf`) +2. Variables file (`variables.tf`) +3. Outputs file (`outputs.tf`) +4. Test file (`test.tftest.hcl`) +5. Documentation file (`README.md`) + +## File Organization + +### Main Module File +1. Provider configuration +2. Resource definitions grouped by logical function +3. Data source lookups +4. Local variables +5. Section headers using `# =============================================================================` + +### Variables File +1. Input variable definitions with strict validation +2. Comprehensive descriptions +3. Sensible defaults where appropriate +4. Type constraints +5. Validation rules using regex where applicable + +Example: +```hcl +variable "cluster_name" { + description = "The name of the cluster." + type = string + default = "talos" + validation { + condition = length(var.cluster_name) > 0 + error_message = "The cluster name must not be empty." + } +} +``` + +### Test File +1. Mock provider configuration +2. Test variables with realistic defaults +3. Test cases organized by configuration scenario +4. Clear test descriptions +5. Comprehensive assertions + +Example test structure: +```hcl +mock_provider "provider" {} + +run "scenario_name" { + command = plan + + variables { + # Test variables with realistic defaults + } + + assert { + condition = resource.attribute == expected_value + error_message = "Clear error message" + } +} +``` + +## Documentation Style + +### Module Headers +Every module MUST begin with a header in the following format: +```hcl +# The [ModuleName] is a [brief description] +# It provides [detailed explanation] +# [role in infrastructure] +# [key features/capabilities] +``` + +### Section Headers +Section headers MUST follow this exact format: +```hcl +# ============================================================================= +# [SECTION NAME] +# ============================================================================= +``` + +File Organization: +1. `outputs.tf` - Contains all module outputs +2. `variables.tf` - Contains all input variables +3. `main.tf` - Contains resources organized by logical grouping + +Section names in `main.tf` should be organized by logical resource grouping. Common groupings include: +1. Provider Configuration +2. Network Resources +3. Compute Resources +4. Storage Resources +5. Security Resources + +Local variables should be defined within their relevant resource sections, not in a separate section. + +Example: +```hcl +# ============================================================================= +# Provider Configuration +# ============================================================================= + +provider "azurerm" { + features {} +} + +# ============================================================================= +# Network Resources +# ============================================================================= + +locals { + vnet_name = "${var.prefix}-vnet" +} + +resource "azurerm_virtual_network" "this" { + name = local.vnet_name + # ... +} + +# ============================================================================= +# Compute Resources +# ============================================================================= + +locals { + vm_name = "${var.prefix}-vm" +} + +resource "azurerm_virtual_machine" "this" { + name = local.vm_name + # ... +} +``` + +### Resource Documentation +- Brief description at the top of each resource +- No inline comments within resource blocks +- Focus on what and why, not how + +Example: +```hcl +# The [ResourceName] is a [brief description] +# It provides [detailed explanation] +resource "resource_type" "resource_name" { + # Configuration +} +``` + +## Testing Patterns + +### Test Structure +Tests should follow a clear scenario-based structure: + +```hcl +run "Scenario" { + command = plan + + # Given [context] + variables { + # Test variables with realistic defaults + } + + # When [action] + # (implicit in the plan/apply command) + + # Then [result] + assert { + condition = resource.attribute == expected_value + error_message = "Expected X, got Y" + } +} +``` + +### Module Test Pattern + +Each module should be tested with the following scenarios, as applicable: + +1. **Minimal Configuration** + - Only required variables set. + - Asserts that default resources are created with expected default values. + - Example: Verifies that required outputs/files/resources are generated. + +2. **Full Configuration** + - All optional variables set. + - Asserts that all features and customizations are reflected in the outputs/resources. + - Example: Verifies that all optional resources are created and attributes match inputs. + +3. **Feature/Conditional Configuration** + - Enables/disables specific features or toggles. + - Asserts that resources are created or omitted as expected. + - Example: Verifies that enabling a feature creates a resource, disabling omits it. + +4. **Module-Specific/Edge Cases** + - Tests unique logic, edge cases, or error conditions. + - Example: Verifies that no files are created when a required path is empty, or that invalid input is handled with a clear error. + +5. **Combined Negative Tests** + - Test multiple validation rules simultaneously in a single test case. + - Use `expect_failures` to verify all validation rules are enforced. + - Include invalid values for all variables with validation rules. + - Example: Testing all input validations (type constraints, format requirements, YAML validation) in one test. + +**Assertions should:** +- Check for presence/absence of resources, files, or outputs. +- Validate that resource attributes match input variables. +- Confirm correct handling of edge cases and error conditions. +- Use `expect_failures` for negative tests to verify validation rules. + +**Tests should not:** +- Assert on implementation details not exposed via outputs or resources. +- Create separate negative tests for each validation rule when they can be combined. + +### Test Organization +1. Group related tests together by configuration scenario +2. Use descriptive scenario names +3. Test both success and failure cases +4. Validate all important attributes +5. Include edge cases +6. Use mock providers for external dependencies diff --git a/terraform/backend/azurerm/main.tf b/terraform/backend/azurerm/main.tf index 39d88390..7ac527f3 100644 --- a/terraform/backend/azurerm/main.tf +++ b/terraform/backend/azurerm/main.tf @@ -46,7 +46,7 @@ resource "azurerm_storage_account" "this" { for_each = var.enable_cmk ? [1] : [] content { type = "UserAssigned" - identity_ids = [azurerm_user_assigned_identity.storage.id] + identity_ids = [azurerm_user_assigned_identity.storage[0].id] } } @@ -118,7 +118,7 @@ locals { #--------------------------------------------------------------------------------------------------- resource "local_file" "backend_config" { - count = var.context_path != "" ? 1 : 0 + count = trim(var.context_path, " ") != "" ? 1 : 0 content = templatefile("${path.module}/templates/backend.tftpl", { resource_group_name = local.resource_group_name storage_account_name = azurerm_storage_account.this.name diff --git a/terraform/backend/azurerm/test.tftest.hcl b/terraform/backend/azurerm/test.tftest.hcl new file mode 100644 index 00000000..dcdd542a --- /dev/null +++ b/terraform/backend/azurerm/test.tftest.hcl @@ -0,0 +1,164 @@ +mock_provider "azurerm" {} + +# Verifies that the module creates resources with default naming conventions and basic configuration. +# Tests the impact of module default values in minimal configuration, including: +# - Default resource naming (resource group, storage account, container) +# - Default network rules (public access allowed) +# - Default storage account settings (tier, replication, TLS version) +run "minimal_configuration" { + command = plan + + variables { + context_id = "test" + location = "eastus2" + } + + assert { + condition = azurerm_resource_group.this.name == "rg-tfstate-test" + error_message = "Resource group name should follow default naming convention" + } + + assert { + condition = azurerm_storage_account.this.name == "tfstatetest" + error_message = "Storage account name should follow default naming convention" + } + + assert { + condition = azurerm_storage_container.this.name == "tfstate-test" + error_message = "Container name should follow default naming convention" + } + + assert { + condition = azurerm_storage_account.this.network_rules[0].default_action == "Allow" + error_message = "Default network rule action should be 'Allow'" + } + + assert { + condition = azurerm_storage_account.this.account_tier == "Standard" + error_message = "Default account tier should be 'Standard'" + } + + assert { + condition = azurerm_storage_account.this.account_replication_type == "LRS" + error_message = "Default replication type should be 'LRS'" + } + + assert { + condition = azurerm_storage_account.this.min_tls_version == "TLS1_2" + error_message = "Default TLS version should be 'TLS1_2'" + } +} + +# Tests a full configuration with all optional variables explicitly set. +# Validates that user-supplied values correctly override defaults for: +# - Resource naming +# - Network security rules +# - Storage account configuration +# - CMK encryption settings +run "full_configuration" { + command = plan + + variables { + context_id = "test" + location = "eastus2" + resource_group_name = "custom-rg" + storage_account_name = "customsa" + container_name = "customcontainer" + allow_public_access = false + allowed_ip_ranges = ["8.8.8.0/24"] + enable_cmk = true + key_vault_key_id = "https://test-keyvault.vault.azure.net/keys/test-key" + } + + assert { + condition = azurerm_resource_group.this.name == "custom-rg" + error_message = "Resource group name should match input" + } + + assert { + condition = azurerm_storage_account.this.name == "customsa" + error_message = "Storage account name should match input" + } + + assert { + condition = azurerm_storage_container.this.name == "customcontainer" + error_message = "Container name should match input" + } + + assert { + condition = azurerm_storage_account.this.network_rules[0].default_action == "Deny" + error_message = "Network rule action should be 'Deny' when public access is disabled" + } + + assert { + condition = contains(azurerm_storage_account.this.network_rules[0].ip_rules, "8.8.8.0/24") + error_message = "IP rule should include allowed range" + } + + assert { + condition = azurerm_user_assigned_identity.storage[0].name == "id-storage-test" + error_message = "User-assigned identity name should follow naming convention" + } + + assert { + condition = azurerm_storage_account.this.identity[0].type == "UserAssigned" + error_message = "Storage account should have UserAssigned identity when CMK is enabled" + } +} + +# Validates that the backend configuration file is generated with correct resource names +# when a context path is provided, enabling Terraform to use the Azure backend +run "backend_config_generation" { + command = plan + + variables { + context_id = "test" + location = "eastus2" + context_path = "test" + } + + assert { + condition = length(local_file.backend_config) == 1 + error_message = "Backend config should be generated with context path" + } + + assert { + condition = trimspace(local_file.backend_config[0].content) == trimspace(<= 3 && + length(var.s3_bucket_name) <= 63 && + can(regex("^[a-z0-9][a-z0-9.-]*[a-z0-9]$", var.s3_bucket_name)) && + !can(regex("\\.\\.", var.s3_bucket_name)) && + !can(regex("[.-]$|^[.-]", var.s3_bucket_name)) + ) + ) + error_message = "S3 bucket name must be 3-63 characters, lowercase letters, numbers, hyphens, periods, no consecutive periods, and cannot start/end with period or hyphen." } } @@ -56,6 +64,12 @@ variable "kms_key_alias" { description = "The KMS key ID for encrypting the S3 bucket" type = string default = "" + validation { + condition = ( + var.kms_key_alias == "" || can(regex("^alias\\/[a-zA-Z0-9/_-]+$", var.kms_key_alias)) + ) + error_message = "KMS key alias must be empty or match ^alias/[a-zA-Z0-9/_-]+$" + } } #--------------------------------------------------------------------------------------------------- @@ -74,6 +88,12 @@ variable "enable_kms" { default = true } +variable "kms_policy_override" { + description = "Override for the KMS policy document (for testing)" + type = string + default = null +} + #--------------------------------------------------------------------------------------------------- # Tags and IAM Roles #--------------------------------------------------------------------------------------------------- diff --git a/terraform/cluster/azure-aks/main.tf b/terraform/cluster/azure-aks/main.tf index 3a464282..c4f70ba2 100644 --- a/terraform/cluster/azure-aks/main.tf +++ b/terraform/cluster/azure-aks/main.tf @@ -272,6 +272,7 @@ resource "azurerm_kubernetes_cluster_node_pool" "autoscaled" { } resource "local_file" "kube_config" { + count = var.context_path != "" ? 1 : 0 content = azurerm_kubernetes_cluster.main.kube_config_raw filename = local.kubeconfig_path } diff --git a/terraform/cluster/azure-aks/test.tftest.hcl b/terraform/cluster/azure-aks/test.tftest.hcl new file mode 100644 index 00000000..9fb95b9f --- /dev/null +++ b/terraform/cluster/azure-aks/test.tftest.hcl @@ -0,0 +1,217 @@ +mock_provider "azurerm" { + mock_data "azurerm_client_config" { + defaults = { + tenant_id = "11111111-1111-1111-1111-111111111111" + object_id = "22222222-2222-2222-2222-222222222222" + } + } + mock_data "azurerm_subnet" { + defaults = { + id = "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Network/virtualNetworks/vnet-test/subnets/subnet-test" + } + } +} +mock_provider "local" {} +mock_provider "time" {} + +# Verifies that the module creates an AKS cluster with minimal configuration, +# ensuring that all default values are correctly applied and only required variables are set. +run "minimal_configuration" { + command = plan + + variables { + context_id = "test" + } + + assert { + condition = azurerm_kubernetes_cluster.main.name == "windsor-aks-cluster-test" + error_message = "Cluster name should default to 'windsor-aks-cluster-test' when cluster_name is omitted" + } + + assert { + condition = azurerm_resource_group.aks.name == "windsor-aks-rg-test" + error_message = "Resource group name should default to 'windsor-aks-rg-test' when resource_group_name is omitted" + } + + assert { + condition = azurerm_kubernetes_cluster.main.default_node_pool[0].name == "system" + error_message = "Default node pool should use 'system' name" + } + + assert { + condition = azurerm_kubernetes_cluster.main.default_node_pool[0].vm_size == "Standard_D2s_v3" + error_message = "Default node pool should use Standard_D2s_v3 VM size" + } + + assert { + condition = azurerm_kubernetes_cluster.main.default_node_pool[0].node_count == 1 + error_message = "Default node pool should have 1 node" + } + + assert { + condition = azurerm_kubernetes_cluster.main.role_based_access_control_enabled == true + error_message = "RBAC should be enabled by default" + } + + assert { + condition = azurerm_kubernetes_cluster.main.private_cluster_enabled == false + error_message = "Private cluster should be disabled by default" + } + + assert { + condition = azurerm_kubernetes_cluster.main.azure_policy_enabled == true + error_message = "Azure policy should be enabled by default" + } + + assert { + condition = azurerm_kubernetes_cluster.main.local_account_disabled == false + error_message = "Local accounts should be enabled by default" + } +} + +# Tests a full configuration with all optional variables explicitly set, +# verifying that the module correctly applies all user-supplied values for node pools and feature flags. +run "full_configuration" { + command = plan + + variables { + context_id = "test" + cluster_name = "test-cluster" + resource_group_name = "test-rg" + kubernetes_version = "1.32" + default_node_pool = { + name = "system" + vm_size = "Standard_D2s_v3" + os_disk_type = "Managed" + max_pods = 30 + host_encryption_enabled = true + min_count = 1 + max_count = 3 + node_count = 1 + } + autoscaled_node_pool = { + enabled = true + name = "autoscaled" + vm_size = "Standard_D2s_v3" + mode = "User" + os_disk_type = "Managed" + max_pods = 30 + host_encryption_enabled = true + min_count = 1 + max_count = 3 + } + role_based_access_control_enabled = true + private_cluster_enabled = false + azure_policy_enabled = true + local_account_disabled = false + } + + assert { + condition = azurerm_kubernetes_cluster.main.name == "test-cluster" + error_message = "Cluster name should match input" + } + + assert { + condition = azurerm_resource_group.aks.name == "test-rg" + error_message = "Resource group name should match input" + } + + assert { + condition = azurerm_kubernetes_cluster.main.default_node_pool[0].name == "system" + error_message = "Default node pool name should match input" + } + + assert { + condition = azurerm_kubernetes_cluster.main.default_node_pool[0].vm_size == "Standard_D2s_v3" + error_message = "Default node pool VM size should match input" + } + + assert { + condition = azurerm_kubernetes_cluster.main.default_node_pool[0].max_pods == 30 + error_message = "Default node pool max pods should match input" + } + + assert { + condition = azurerm_kubernetes_cluster.main.default_node_pool[0].host_encryption_enabled == true + error_message = "Default node pool host encryption should be enabled" + } + + assert { + condition = length(azurerm_kubernetes_cluster_node_pool.autoscaled) == 1 + error_message = "Autoscaled node pool should be created when enabled" + } + + assert { + condition = azurerm_kubernetes_cluster_node_pool.autoscaled[0].name == "autoscaled" + error_message = "Autoscaled node pool name should match input" + } + + assert { + condition = azurerm_kubernetes_cluster_node_pool.autoscaled[0].vm_size == "Standard_D2s_v3" + error_message = "Autoscaled node pool VM size should match input" + } + + assert { + condition = azurerm_kubernetes_cluster_node_pool.autoscaled[0].max_pods == 30 + error_message = "Autoscaled node pool max pods should match input" + } + + assert { + condition = azurerm_kubernetes_cluster_node_pool.autoscaled[0].host_encryption_enabled == true + error_message = "Autoscaled node pool host encryption should be enabled" + } + + assert { + condition = azurerm_kubernetes_cluster.main.role_based_access_control_enabled == true + error_message = "RBAC should be enabled" + } + + assert { + condition = azurerm_kubernetes_cluster.main.private_cluster_enabled == false + error_message = "Private cluster should be disabled" + } + + assert { + condition = azurerm_kubernetes_cluster.main.azure_policy_enabled == true + error_message = "Azure policy should be enabled" + } + + assert { + condition = azurerm_kubernetes_cluster.main.local_account_disabled == false + error_message = "Local accounts should be enabled" + } +} + +# Tests the private cluster configuration, ensuring that enabling the private_cluster_enabled +# variable results in a private AKS cluster as expected. +run "private_cluster" { + command = plan + + variables { + context_id = "test" + cluster_name = "test-cluster" + private_cluster_enabled = true + } + + assert { + condition = azurerm_kubernetes_cluster.main.private_cluster_enabled == true + error_message = "Private cluster should be enabled" + } +} + +# Verifies that no kubeconfig file is generated when context_path is empty, +# preventing unnecessary file creation in the root directory. +run "no_config_files" { + command = plan + + variables { + context_id = "test" + cluster_name = "test-cluster" + context_path = "" + } + + assert { + condition = length(local_file.kube_config) == 0 + error_message = "No kubeconfig file should be generated without context path" + } +} diff --git a/terraform/cluster/talos/main.tf b/terraform/cluster/talos/main.tf index 7c6496d7..3e407d1f 100644 --- a/terraform/cluster/talos/main.tf +++ b/terraform/cluster/talos/main.tf @@ -132,8 +132,8 @@ data "talos_client_configuration" "this" { // Write kubeconfig to a local file resource "local_sensitive_file" "kubeconfig" { - count = local.kubeconfig_path != "" ? 1 : 0 // Create file only if path is specified - depends_on = [local_sensitive_file.talosconfig] // Ensure Talos config is written first + count = trim(var.context_path, " ") != "" ? 1 : 0 // Create file only if path is specified and not empty/whitespace + depends_on = [local_sensitive_file.talosconfig] // Ensure Talos config is written first content = talos_cluster_kubeconfig.this.kubeconfig_raw filename = local.kubeconfig_path @@ -146,7 +146,7 @@ resource "local_sensitive_file" "kubeconfig" { // Write Talos config to a local file resource "local_sensitive_file" "talosconfig" { - count = local.talosconfig_path != "" ? 1 : 0 // Create file only if path is specified + count = trim(var.context_path, " ") != "" ? 1 : 0 // Create file only if path is specified and not empty/whitespace content = data.talos_client_configuration.this.talos_config filename = local.talosconfig_path diff --git a/terraform/cluster/talos/modules/machine/.terraform.lock.hcl b/terraform/cluster/talos/modules/machine/.terraform.lock.hcl new file mode 100644 index 00000000..515ed004 --- /dev/null +++ b/terraform/cluster/talos/modules/machine/.terraform.lock.hcl @@ -0,0 +1,24 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/siderolabs/talos" { + version = "0.8.0" + hashes = [ + "h1:5Ik5stEr3alQjCwSs+hU7poVoa+CZh/Z2IH3MtiyHf4=", + "zh:0273010292fc4faa8c9c4c1f406a5c962a494f931fb2570ce880dc19af04342c", + "zh:0785a8cdb72c917da99ab8795cf5312ecfcf73fd06d9d9893e25867ca1af136f", + "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", + "zh:3936387665b16ab7e9ac08e1b25f1c65fdc1f8be54e6fb7dd4fa414bf1dfa261", + "zh:46ebd13aee6b5fa5abdcadb5e641125b69e9c48014d4625057e7bd5dc4d0a283", + "zh:5aabeb7c8f4dfcc8696aa4c6278043611e11f27dec42c3c0e090a495767bc274", + "zh:5f7acfe13775c29250f1cc37eddfc6cf7ee7e4cc58097f66ef6dc3dbe723fad8", + "zh:6170c27dce21ac47561755646b5fd821d14590fe503c100d73caaa5a34cba5a9", + "zh:7adc17bfb63a5ea7ce04785eb5374c0342ab8b5017538ecf77a3312feb3d4d6a", + "zh:944cd1a1fe3333bd97482b5b82e97b363b8b977d72e9b44f8e04c6e8d272a527", + "zh:a1e6c8e628847a583f6426d8d56be18e5da086630db9cb531c49f0fbf6db4ea4", + "zh:c920fff2336819ffffb81597bf3cf7d3b20cd07b0419a3dd20c62f2aed9696ac", + "zh:cfc260e85c3c88605b7705cdb2aefdd07f9b933bd98e26497f27c928a1232673", + "zh:d1927de9116cd9dbcdb1f550058e651c28c3b7dea58cae83feb79ddb69dbcb4c", + "zh:e9e11260645c35dc97b20a45b014ea279945ad4e09bc4f4fd898b32689c529e1", + ] +} diff --git a/terraform/cluster/talos/modules/machine/test.tftest.hcl b/terraform/cluster/talos/modules/machine/test.tftest.hcl new file mode 100644 index 00000000..f5132728 --- /dev/null +++ b/terraform/cluster/talos/modules/machine/test.tftest.hcl @@ -0,0 +1,158 @@ +mock_provider "talos" { + mock_resource "talos_machine_configuration" {} + mock_resource "talos_machine_configuration_apply" {} + mock_resource "talos_machine_bootstrap" {} +} + +variables { + machine_type = "controlplane" + endpoint = "dummy" + node = "dummy" + client_configuration = { + ca_certificate = "dummy" + client_certificate = "dummy" + client_key = "dummy" + } + machine_secrets = { + certs = { + etcd = { + cert = "dummy" + key = "dummy" + } + k8s = { + cert = "dummy" + key = "dummy" + } + k8s_aggregator = { + cert = "dummy" + key = "dummy" + } + k8s_serviceaccount = { + key = "dummy" + } + os = { + cert = "dummy" + key = "dummy" + } + } + cluster = { + id = "dummy" + secret = "dummy" + } + secrets = { + bootstrap_token = "dummy" + secretbox_encryption_secret = "dummy" + } + trustdinfo = { + token = "dummy" + } + } + cluster_name = "dummy" + cluster_endpoint = "https://dummy" + kubernetes_version = "dummy" + talos_version = "1.10.1" +} + +run "machine_config_patch_with_disk_and_hostname" { + variables { + disk_selector = { + busPath = "" + modalias = "" + model = "" + name = "/dev/sda" + serial = "" + size = "0" + type = "" + uuid = "" + wwid = "" + } + wipe_disk = true + hostname = "test-node" + extra_kernel_args = ["console=tty0"] + image = "test-image" + extensions = [{ image = "test-extension" }] + } + assert { + condition = strcontains(local.machine_config_patch, "\"name\": \"/dev/sda\"") + error_message = "Should include disk name /dev/sda" + } + assert { + condition = strcontains(local.machine_config_patch, "\"hostname\": \"test-node\"") + error_message = "Should include hostname test-node" + } + assert { + condition = strcontains(local.machine_config_patch, "\"extraKernelArgs\":\n - \"console=tty0\"") + error_message = "Should include extra kernel arg console=tty0" + } + assert { + condition = strcontains(local.machine_config_patch, "\"image\": \"test-image\"") + error_message = "Should include image test-image" + } + assert { + condition = strcontains(local.machine_config_patch, "- \"image\": \"test-extension\"") + error_message = "Should include extension test-extension" + } +} + +run "machine_config_patch_without_disk" { + variables { + disk_selector = null + hostname = "test-node" + } + assert { + condition = !can(regex("diskSelector", local.machine_config_patch)) + error_message = "Should not include diskSelector block" + } + assert { + condition = can(regex("hostname", local.machine_config_patch)) + error_message = "Should include hostname block" + } +} + +run "machine_config_patch_without_hostname" { + variables { + disk_selector = { + busPath = "" + modalias = "" + model = "" + name = "/dev/sda" + serial = "" + size = "0" + type = "" + uuid = "" + wwid = "" + } + hostname = null + } + assert { + condition = can(regex("diskSelector", local.machine_config_patch)) + error_message = "Should include diskSelector block" + } + assert { + condition = !can(regex("hostname", local.machine_config_patch)) + error_message = "Should not include hostname block" + } +} + +run "config_patches_includes_extra" { + variables { + disk_selector = null + hostname = "test-node" + config_patches = [ + <<-EOT + machine: + network: + nameservers: + - 8.8.8.8 + EOT + ] + } + assert { + condition = length(local.config_patches) == 2 + error_message = "Should include both machine_config_patch and extra patch" + } + assert { + condition = strcontains(local.config_patches[1], "- 8.8.8.8") + error_message = "Should include nameservers in extra patch" + } +} diff --git a/terraform/cluster/talos/test.tftest.hcl b/terraform/cluster/talos/test.tftest.hcl new file mode 100644 index 00000000..96534b36 --- /dev/null +++ b/terraform/cluster/talos/test.tftest.hcl @@ -0,0 +1,258 @@ +# Verifies that the module creates machine secrets with the correct Talos version +# and generates the necessary configuration files for cluster access using minimal configuration +run "minimal_configuration" { + command = plan + + variables { + context_path = "test" + cluster_name = "test-cluster" + cluster_endpoint = "https://test.example.com:6443" + controlplanes = [ + { + hostname = "cp1" + endpoint = "https://cp1.example.com:6443" + node = "192.168.1.10" + } + ] + } + + assert { + condition = length(local_sensitive_file.talosconfig) == 1 + error_message = "Talos config file should be generated" + } + + assert { + condition = length(local_sensitive_file.kubeconfig) == 1 + error_message = "Kubeconfig file should be generated" + } + + assert { + condition = module.controlplane_bootstrap.node == "192.168.1.10" + error_message = "controlplane_bootstrap node should match input" + } + assert { + condition = module.controlplane_bootstrap.endpoint == "https://cp1.example.com:6443" + error_message = "controlplane_bootstrap endpoint should match input" + } + assert { + condition = length(module.controlplanes) == 0 + error_message = "No additional controlplanes should be created in minimal config" + } +} + +# Tests a full configuration with all optional variables explicitly set +run "full_configuration" { + command = plan + + variables { + cluster_name = "test-cluster" + cluster_endpoint = "https://test.example.com:6443" + kubernetes_version = "1.33.0" + talos_version = "1.10.1" + common_config_patches = <<-EOT + machine: + network: + nameservers: + - 8.8.8.8 + EOT + controlplane_config_patches = <<-EOT + machine: + controlplane: + extraArgs: + - "--enable-admission-plugins=NodeRestriction" + EOT + worker_config_patches = <<-EOT + machine: + kubelet: + extraArgs: + - "--max-pods=110" + EOT + controlplanes = [ + { + hostname = "cp1" + endpoint = "https://cp1.example.com:6443" + node = "192.168.1.10" + disk_selector = { + name = "/dev/sda" + } + wipe_disk = true + } + ] + workers = [ + { + hostname = "worker1" + endpoint = "https://worker1.example.com:6443" + node = "192.168.1.20" + disk_selector = { + name = "/dev/sdb" + } + wipe_disk = false + } + ] + } + + assert { + condition = module.controlplane_bootstrap.node == "192.168.1.10" + error_message = "controlplane_bootstrap node should match input" + } + assert { + condition = module.controlplane_bootstrap.endpoint == "https://cp1.example.com:6443" + error_message = "controlplane_bootstrap endpoint should match input" + } + assert { + condition = length(module.controlplanes) == 0 + error_message = "No additional controlplanes should be created in this config" + } + assert { + condition = length(module.workers) == 1 + error_message = "Should create one worker" + } + assert { + condition = module.workers[0].node == "192.168.1.20" + error_message = "Worker node should match input" + } + assert { + condition = module.workers[0].endpoint == "https://worker1.example.com:6443" + error_message = "Worker endpoint should match input" + } +} + +# Tests the creation of a multi-node cluster with both control planes and workers, +# ensuring proper configuration for each node type +run "multi_node_configuration" { + command = plan + + variables { + cluster_name = "test-cluster" + cluster_endpoint = "https://test.example.com:6443" + kubernetes_version = "1.33.0" + talos_version = "1.10.1" + controlplanes = [ + { + hostname = "cp1" + endpoint = "https://cp1.example.com:6443" + node = "192.168.1.10" + }, + { + hostname = "cp2" + endpoint = "https://cp2.example.com:6443" + node = "192.168.1.11" + } + ] + workers = [ + { + hostname = "worker1" + endpoint = "https://worker1.example.com:6443" + node = "192.168.1.20" + } + ] + } + + assert { + condition = module.controlplane_bootstrap.node == "192.168.1.10" + error_message = "controlplane_bootstrap node should match first input" + } + assert { + condition = module.controlplane_bootstrap.endpoint == "https://cp1.example.com:6443" + error_message = "controlplane_bootstrap endpoint should match first input" + } + assert { + condition = length(module.controlplanes) == 1 + error_message = "Should create one additional control plane" + } + assert { + condition = module.controlplanes[0].node == "192.168.1.11" + error_message = "Second controlplane node should match input" + } + assert { + condition = module.controlplanes[0].endpoint == "https://cp2.example.com:6443" + error_message = "Second controlplane endpoint should match input" + } + assert { + condition = length(module.workers) == 1 + error_message = "Should create one worker" + } + assert { + condition = module.workers[0].node == "192.168.1.20" + error_message = "Worker node should match input" + } + assert { + condition = module.workers[0].endpoint == "https://worker1.example.com:6443" + error_message = "Worker endpoint should match input" + } +} + +# Verifies that no configuration files are generated when context_path is empty, +# preventing unnecessary file creation in the root directory +run "no_config_files" { + command = plan + + variables { + context_path = "" + cluster_name = "test-cluster" + cluster_endpoint = "https://test.example.com:6443" + kubernetes_version = "1.33.0" + talos_version = "1.10.1" + controlplanes = [ + { + hostname = "cp1" + endpoint = "https://cp1.example.com:6443" + node = "192.168.1.10" + } + ] + workers = [] + } + + assert { + condition = length(local_sensitive_file.talosconfig) == 0 + error_message = "No Talos config file should be generated without context path" + } + + assert { + condition = length(local_sensitive_file.kubeconfig) == 0 + error_message = "No Kubeconfig file should be generated without context path" + } +} + +# Verifies that all input validation rules are enforced simultaneously, ensuring that +# invalid values for os_type, kubernetes_version, talos_version, cluster_name, +# cluster_endpoint, and YAML configs are properly caught and reported +run "multiple_invalid_inputs" { + command = plan + expect_failures = [ + var.os_type, + var.kubernetes_version, + var.talos_version, + var.cluster_name, + var.cluster_endpoint, + var.common_config_patches, + var.controlplane_config_patches, + var.worker_config_patches, + var.controlplanes, + var.workers, + ] + variables { + os_type = "macos" + kubernetes_version = "v1.33" + talos_version = "v1.10.1" + cluster_name = "" + cluster_endpoint = "http://localhost:6443" + common_config_patches = "not: valid: yaml: [" + controlplane_config_patches = "not: valid: yaml: [" + worker_config_patches = "not: valid: yaml: [" + controlplanes = [ + { + endpoint = "http://localhost:6443" + node = "192.168.1.10" + config_patches = "not: valid: yaml: [" + } + ] + workers = [ + { + endpoint = "http://localhost:6443" + node = "192.168.1.20" + config_patches = "not: valid: yaml: [" + } + ] + } +} diff --git a/terraform/gitops/flux/test.tftest.hcl b/terraform/gitops/flux/test.tftest.hcl new file mode 100644 index 00000000..2e36f1ad --- /dev/null +++ b/terraform/gitops/flux/test.tftest.hcl @@ -0,0 +1,118 @@ +mock_provider "kubernetes" {} +mock_provider "helm" {} + +# Verifies that the module creates the Flux namespace, Helm release, and secrets with minimal configuration. +# Tests default values for namespace, chart version, and secret naming logic. +run "minimal_configuration" { + command = plan + + variables { + # Only required variables, all others use defaults + } + + assert { + condition = kubernetes_namespace.flux_system.metadata[0].name == "system-gitops" + error_message = "Flux namespace should default to 'system-gitops'" + } + + assert { + condition = helm_release.flux_system.version == "2.15.0" + error_message = "Flux Helm chart version should default to '2.15.0'" + } + + assert { + condition = kubernetes_secret.git_auth.metadata[0].name == "flux-system" + error_message = "Git auth secret name should default to 'flux-system'" + } + + assert { + condition = kubernetes_secret.git_auth.metadata[0].namespace == "system-gitops" + error_message = "Git auth secret should be in the Flux namespace" + } + + assert { + condition = kubernetes_secret.webhook_token.metadata[0].namespace == "system-gitops" + error_message = "Webhook token secret should be in the Flux namespace" + } +} + +# Tests a full configuration with all optional variables explicitly set. +# Validates that user-supplied values override defaults for namespace, chart version, and secret data. +run "full_configuration" { + command = plan + + variables { + flux_namespace = "custom-gitops" + flux_helm_version = "2.16.0" + flux_version = "2.6.0" + ssh_private_key = "PRIVATEKEY" + ssh_public_key = "PUBLICKEY" + ssh_known_hosts = "KNOWNHOSTS" + git_auth_secret = "custom-auth" + git_username = "customuser" + git_password = "custompass" + webhook_token = "webhooktoken123" + } + + assert { + condition = kubernetes_namespace.flux_system.metadata[0].name == "custom-gitops" + error_message = "Flux namespace should match input" + } + + assert { + condition = helm_release.flux_system.version == "2.16.0" + error_message = "Flux Helm chart version should match input" + } + + assert { + condition = kubernetes_secret.git_auth.metadata[0].name == "custom-auth" + error_message = "Git auth secret name should match input" + } + + assert { + condition = kubernetes_secret.git_auth.metadata[0].namespace == "custom-gitops" + error_message = "Git auth secret should be in the custom namespace" + } + + assert { + condition = kubernetes_secret.webhook_token.metadata[0].namespace == "custom-gitops" + error_message = "Webhook token secret should be in the custom namespace" + } +} + +# Verifies that no secrets are created if all sensitive variables are empty (default) +run "no_secrets" { + command = plan + + variables { + ssh_private_key = "" + ssh_public_key = "" + ssh_known_hosts = "" + git_password = "" + webhook_token = "" + } + + assert { + condition = kubernetes_secret.git_auth.data != null + error_message = "Git auth secret data should be present (even if empty)" + } + + assert { + condition = kubernetes_secret.webhook_token.data != null + error_message = "Webhook token secret data should be present (even if empty)" + } +} + +# Verifies that all input validation rules are enforced simultaneously, ensuring that +# invalid values for Flux versions are properly caught +run "multiple_invalid_inputs" { + command = plan + expect_failures = [ + var.flux_helm_version, + var.flux_version, + ] + variables { + flux_helm_version = "2.15" # Missing patch version + flux_version = "2.5" # Missing patch version + } +} diff --git a/terraform/network/azure-vnet/test.tftest.hcl b/terraform/network/azure-vnet/test.tftest.hcl new file mode 100644 index 00000000..0f5d8f03 --- /dev/null +++ b/terraform/network/azure-vnet/test.tftest.hcl @@ -0,0 +1,101 @@ +mock_provider "azurerm" {} + +# Verifies that the module creates the VNet, resource group, and subnets with minimal configuration. +# Tests default values for naming, CIDR, and subnet creation. +run "minimal_configuration" { + command = plan + + variables { + context_id = "test" + } + + assert { + condition = azurerm_resource_group.main.name == "windsor-vnet-rg-test" + error_message = "Resource group name should follow default naming convention" + } + + assert { + condition = azurerm_virtual_network.main.name == "windsor-vnet-test" + error_message = "VNet name should follow default naming convention" + } + + assert { + condition = [for space in azurerm_virtual_network.main.address_space : space][0] == "10.20.0.0/16" + error_message = "VNet CIDR should default to '10.20.0.0/16'" + } + + assert { + condition = length(azurerm_subnet.public) == 1 + error_message = "One public subnet should be created by default" + } + + assert { + condition = length(azurerm_subnet.private) == 1 + error_message = "One private subnet should be created by default" + } + + assert { + condition = length(azurerm_subnet.data) == 1 + error_message = "One data subnet should be created by default" + } + + assert { + condition = length(azurerm_nat_gateway.main) == 1 + error_message = "One NAT Gateway should be created by default" + } +} + +# Tests a full configuration with all optional variables explicitly set. +# Validates that user-supplied values override defaults for naming, CIDR, and subnet creation. +run "full_configuration" { + command = plan + + variables { + region = "westus" + resource_group_name = "custom-rg" + vnet_name = "custom-vnet" + vnet_zones = 2 + vnet_cidr = "10.30.0.0/16" + vnet_subnets = { + public = ["10.30.1.0/24", "10.30.2.0/24"] + private = ["10.30.11.0/24", "10.30.12.0/24"] + data = ["10.30.21.0/24", "10.30.22.0/24"] + } + context_id = "test" + } + + assert { + condition = azurerm_resource_group.main.name == "custom-rg" + error_message = "Resource group name should match input" + } + + assert { + condition = azurerm_virtual_network.main.name == "custom-vnet" + error_message = "VNet name should match input" + } + + assert { + condition = [for space in azurerm_virtual_network.main.address_space : space][0] == "10.30.0.0/16" + error_message = "VNet CIDR should match input" + } + + assert { + condition = length(azurerm_subnet.public) == 2 + error_message = "Two public subnets should be created" + } + + assert { + condition = length(azurerm_subnet.private) == 2 + error_message = "Two private subnets should be created" + } + + assert { + condition = length(azurerm_subnet.data) == 2 + error_message = "Two data subnets should be created" + } + + assert { + condition = length(azurerm_nat_gateway.main) == 2 + error_message = "Two NAT Gateways should be created" + } +} From 94e0a0096d1e5d9967bfc47152dcfd852286a794 Mon Sep 17 00:00:00 2001 From: Ryan VanGundy Date: Wed, 14 May 2025 13:20:37 -0400 Subject: [PATCH 2/3] Fmt and fix checkov --- terraform/backend/azurerm/test.tftest.hcl | 2 +- terraform/backend/s3/test.tftest.hcl | 6 +-- terraform/cluster/talos/test.tftest.hcl | 22 +++++------ terraform/gitops/flux/test.tftest.hcl | 8 ++-- terraform/network/azure-vnet/test.tftest.hcl | 40 ++++++++++---------- 5 files changed, 39 insertions(+), 39 deletions(-) diff --git a/terraform/backend/azurerm/test.tftest.hcl b/terraform/backend/azurerm/test.tftest.hcl index dcdd542a..030dbfee 100644 --- a/terraform/backend/azurerm/test.tftest.hcl +++ b/terraform/backend/azurerm/test.tftest.hcl @@ -158,7 +158,7 @@ run "multiple_invalid_inputs" { var.storage_account_name, ] variables { - context_id = "test" + context_id = "test" storage_account_name = "this-is-a-very-long-storage-account-name-that-exceeds-the-limit" # Too long } } diff --git a/terraform/backend/s3/test.tftest.hcl b/terraform/backend/s3/test.tftest.hcl index 7a5e50f2..07af527f 100644 --- a/terraform/backend/s3/test.tftest.hcl +++ b/terraform/backend/s3/test.tftest.hcl @@ -208,9 +208,9 @@ run "multiple_invalid_inputs" { var.kms_key_alias, ] variables { - context_id = "test" - s3_bucket_name = "a" # Too short + context_id = "test" + s3_bucket_name = "a" # Too short s3_log_bucket_name = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # Too long - kms_key_alias = "invalid-alias" # Invalid format + kms_key_alias = "invalid-alias" # Invalid format } } diff --git a/terraform/cluster/talos/test.tftest.hcl b/terraform/cluster/talos/test.tftest.hcl index 96534b36..9f7ab08f 100644 --- a/terraform/cluster/talos/test.tftest.hcl +++ b/terraform/cluster/talos/test.tftest.hcl @@ -232,25 +232,25 @@ run "multiple_invalid_inputs" { var.workers, ] variables { - os_type = "macos" - kubernetes_version = "v1.33" - talos_version = "v1.10.1" - cluster_name = "" - cluster_endpoint = "http://localhost:6443" - common_config_patches = "not: valid: yaml: [" + os_type = "macos" + kubernetes_version = "v1.33" + talos_version = "v1.10.1" + cluster_name = "" + cluster_endpoint = "http://localhost:6443" + common_config_patches = "not: valid: yaml: [" controlplane_config_patches = "not: valid: yaml: [" - worker_config_patches = "not: valid: yaml: [" + worker_config_patches = "not: valid: yaml: [" controlplanes = [ { - endpoint = "http://localhost:6443" - node = "192.168.1.10" + endpoint = "http://localhost:6443" + node = "192.168.1.10" config_patches = "not: valid: yaml: [" } ] workers = [ { - endpoint = "http://localhost:6443" - node = "192.168.1.20" + endpoint = "http://localhost:6443" + node = "192.168.1.20" config_patches = "not: valid: yaml: [" } ] diff --git a/terraform/gitops/flux/test.tftest.hcl b/terraform/gitops/flux/test.tftest.hcl index 2e36f1ad..3bfff83e 100644 --- a/terraform/gitops/flux/test.tftest.hcl +++ b/terraform/gitops/flux/test.tftest.hcl @@ -45,13 +45,13 @@ run "full_configuration" { flux_namespace = "custom-gitops" flux_helm_version = "2.16.0" flux_version = "2.6.0" - ssh_private_key = "PRIVATEKEY" + ssh_private_key = "PRIVATEKEY" # checkov:skip=CKV_SECRET_6: Test file, secrets are not real ssh_public_key = "PUBLICKEY" ssh_known_hosts = "KNOWNHOSTS" - git_auth_secret = "custom-auth" + git_auth_secret = "custom-auth" # checkov:skip=CKV_SECRET_6: Test file, secrets are not real git_username = "customuser" git_password = "custompass" - webhook_token = "webhooktoken123" + webhook_token = "webhooktoken123" # checkov:skip=CKV_SECRET_6: Test file, secrets are not real } assert { @@ -113,6 +113,6 @@ run "multiple_invalid_inputs" { ] variables { flux_helm_version = "2.15" # Missing patch version - flux_version = "2.5" # Missing patch version + flux_version = "2.5" # Missing patch version } } diff --git a/terraform/network/azure-vnet/test.tftest.hcl b/terraform/network/azure-vnet/test.tftest.hcl index 0f5d8f03..72658c8e 100644 --- a/terraform/network/azure-vnet/test.tftest.hcl +++ b/terraform/network/azure-vnet/test.tftest.hcl @@ -10,37 +10,37 @@ run "minimal_configuration" { } assert { - condition = azurerm_resource_group.main.name == "windsor-vnet-rg-test" + condition = azurerm_resource_group.main.name == "windsor-vnet-rg-test" error_message = "Resource group name should follow default naming convention" } assert { - condition = azurerm_virtual_network.main.name == "windsor-vnet-test" + condition = azurerm_virtual_network.main.name == "windsor-vnet-test" error_message = "VNet name should follow default naming convention" } assert { - condition = [for space in azurerm_virtual_network.main.address_space : space][0] == "10.20.0.0/16" + condition = [for space in azurerm_virtual_network.main.address_space : space][0] == "10.20.0.0/16" error_message = "VNet CIDR should default to '10.20.0.0/16'" } assert { - condition = length(azurerm_subnet.public) == 1 + condition = length(azurerm_subnet.public) == 1 error_message = "One public subnet should be created by default" } assert { - condition = length(azurerm_subnet.private) == 1 + condition = length(azurerm_subnet.private) == 1 error_message = "One private subnet should be created by default" } assert { - condition = length(azurerm_subnet.data) == 1 + condition = length(azurerm_subnet.data) == 1 error_message = "One data subnet should be created by default" } assert { - condition = length(azurerm_nat_gateway.main) == 1 + condition = length(azurerm_nat_gateway.main) == 1 error_message = "One NAT Gateway should be created by default" } } @@ -51,51 +51,51 @@ run "full_configuration" { command = plan variables { - region = "westus" + region = "westus" resource_group_name = "custom-rg" - vnet_name = "custom-vnet" - vnet_zones = 2 - vnet_cidr = "10.30.0.0/16" - vnet_subnets = { + vnet_name = "custom-vnet" + vnet_zones = 2 + vnet_cidr = "10.30.0.0/16" + vnet_subnets = { public = ["10.30.1.0/24", "10.30.2.0/24"] private = ["10.30.11.0/24", "10.30.12.0/24"] data = ["10.30.21.0/24", "10.30.22.0/24"] } - context_id = "test" + context_id = "test" } assert { - condition = azurerm_resource_group.main.name == "custom-rg" + condition = azurerm_resource_group.main.name == "custom-rg" error_message = "Resource group name should match input" } assert { - condition = azurerm_virtual_network.main.name == "custom-vnet" + condition = azurerm_virtual_network.main.name == "custom-vnet" error_message = "VNet name should match input" } assert { - condition = [for space in azurerm_virtual_network.main.address_space : space][0] == "10.30.0.0/16" + condition = [for space in azurerm_virtual_network.main.address_space : space][0] == "10.30.0.0/16" error_message = "VNet CIDR should match input" } assert { - condition = length(azurerm_subnet.public) == 2 + condition = length(azurerm_subnet.public) == 2 error_message = "Two public subnets should be created" } assert { - condition = length(azurerm_subnet.private) == 2 + condition = length(azurerm_subnet.private) == 2 error_message = "Two private subnets should be created" } assert { - condition = length(azurerm_subnet.data) == 2 + condition = length(azurerm_subnet.data) == 2 error_message = "Two data subnets should be created" } assert { - condition = length(azurerm_nat_gateway.main) == 2 + condition = length(azurerm_nat_gateway.main) == 2 error_message = "Two NAT Gateways should be created" } } From be3f4917a364c0d8efeb85cd047f48f6cdf5770a Mon Sep 17 00:00:00 2001 From: Ryan VanGundy Date: Wed, 14 May 2025 13:24:01 -0400 Subject: [PATCH 3/3] Cleanup --- terraform/cluster/azure-aks/test.tftest.hcl | 2 -- 1 file changed, 2 deletions(-) diff --git a/terraform/cluster/azure-aks/test.tftest.hcl b/terraform/cluster/azure-aks/test.tftest.hcl index 9fb95b9f..fb1e90d7 100644 --- a/terraform/cluster/azure-aks/test.tftest.hcl +++ b/terraform/cluster/azure-aks/test.tftest.hcl @@ -11,8 +11,6 @@ mock_provider "azurerm" { } } } -mock_provider "local" {} -mock_provider "time" {} # Verifies that the module creates an AKS cluster with minimal configuration, # ensuring that all default values are correctly applied and only required variables are set.