diff --git a/terraform/cluster/azure-aks/.terraform.lock.hcl b/terraform/cluster/azure-aks/.terraform.lock.hcl new file mode 100644 index 00000000..2788af76 --- /dev/null +++ b/terraform/cluster/azure-aks/.terraform.lock.hcl @@ -0,0 +1,60 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/azurerm" { + version = "4.27.0" + constraints = "~> 4.27.0" + hashes = [ + "h1:hmAzHk4XVbrGQ5iJJj1QdFx0aWNW9Hjh+GIE6S8G5I8=", + "zh:0c69edea1995bd3bd9e61980757169c35bf22281b660b5c755b6cb13d08d29d2", + "zh:25b86bf7b9678371d8573983954c571696f3e64a3967133be3b835da36307106", + "zh:49921cff4f26a49bafada60cd07dabb52c5eb35231059ed928a4f4722e269c82", + "zh:4b986166531f9fd1289f01d8220519443e74888a21da512c1b841b006dad6215", + "zh:53fb65b2ca4df637f03e4748a100a7d7fc77249e307c03e294d6259cec0310f6", + "zh:5c0d021a387ca4e2a5a01da009746a08c45f08e971c10d9bda54539d7264d671", + "zh:600043f2b20dc5a45275e43f175c19fe8b6e8e9557a0c884aef018f1f63de90e", + "zh:a0284f6f38912f67bb4cb7829fda3fa75be81fea6a9b21119965c2a839430092", + "zh:a7ac0576e2069ef77557042c6b5157ded364fbd355b2f9bf7f5441622424086e", + "zh:c5db0bcafe986868e28cc6225b68b2d1cf4bf631939d260ca845f17a9aa1677d", + "zh:ce620c0eb71b1fdd925828b30cf232a869abccf1c459180f2f991c4166315251", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/local" { + version = "2.5.2" + hashes = [ + "h1:IyFbOIO6mhikFNL/2h1iZJ6kyN3U00jgkpCLUCThAfE=", + "zh:136299545178ce281c56f36965bf91c35407c11897f7082b3b983d86cb79b511", + "zh:3b4486858aa9cb8163378722b642c57c529b6c64bfbfc9461d940a84cd66ebea", + "zh:4855ee628ead847741aa4f4fc9bed50cfdbf197f2912775dd9fe7bc43fa077c0", + "zh:4b8cd2583d1edcac4011caafe8afb7a95e8110a607a1d5fb87d921178074a69b", + "zh:52084ddaff8c8cd3f9e7bcb7ce4dc1eab00602912c96da43c29b4762dc376038", + "zh:71562d330d3f92d79b2952ffdda0dad167e952e46200c767dd30c6af8d7c0ed3", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:805f81ade06ff68fa8b908d31892eaed5c180ae031c77ad35f82cb7a74b97cf4", + "zh:8b6b3ebeaaa8e38dd04e56996abe80db9be6f4c1df75ac3cccc77642899bd464", + "zh:ad07750576b99248037b897de71113cc19b1a8d0bc235eb99173cc83d0de3b1b", + "zh:b9f1c3bfadb74068f5c205292badb0661e17ac05eb23bfe8bd809691e4583d0e", + "zh:cc4cbcd67414fefb111c1bf7ab0bc4beb8c0b553d01719ad17de9a047adff4d1", + ] +} + +provider "registry.terraform.io/hashicorp/time" { + version = "0.13.1" + hashes = [ + "h1:ZT5ppCNIModqk3iOkVt5my8b8yBHmDpl663JtXAIRqM=", + "zh:02cb9aab1002f0f2a94a4f85acec8893297dc75915f7404c165983f720a54b74", + "zh:04429b2b31a492d19e5ecf999b116d396dac0b24bba0d0fb19ecaefe193fdb8f", + "zh:26f8e51bb7c275c404ba6028c1b530312066009194db721a8427a7bc5cdbc83a", + "zh:772ff8dbdbef968651ab3ae76d04afd355c32f8a868d03244db3f8496e462690", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:898db5d2b6bd6ca5457dccb52eedbc7c5b1a71e4a4658381bcbb38cedbbda328", + "zh:8de913bf09a3fa7bedc29fec18c47c571d0c7a3d0644322c46f3aa648cf30cd8", + "zh:9402102c86a87bdfe7e501ffbb9c685c32bbcefcfcf897fd7d53df414c36877b", + "zh:b18b9bb1726bb8cfbefc0a29cf3657c82578001f514bcf4c079839b6776c47f0", + "zh:b9d31fdc4faecb909d7c5ce41d2479dd0536862a963df434be4b16e8e4edc94d", + "zh:c951e9f39cca3446c060bd63933ebb89cedde9523904813973fbc3d11863ba75", + "zh:e5b773c0d07e962291be0e9b413c7a22c044b8c7b58c76e8aa91d1659990dfb5", + ] +} diff --git a/terraform/cluster/azure-aks/main.tf b/terraform/cluster/azure-aks/main.tf new file mode 100644 index 00000000..70fcbb81 --- /dev/null +++ b/terraform/cluster/azure-aks/main.tf @@ -0,0 +1,277 @@ +#--------------------------------------------------------------------------------------------------- +# Versions +#--------------------------------------------------------------------------------------------------- + +terraform { + required_version = ">=1.8" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 4.27.0" + } + } +} + +#----------------------------------------------------------------------------------------------------------------------- +# Azure Provider configuration +#----------------------------------------------------------------------------------------------------------------------- + +provider "azurerm" { + features { + resource_group { + prevent_deletion_if_contains_resources = false + } + key_vault { + purge_soft_deleted_keys_on_destroy = true + recover_soft_deleted_keys = true + } + } +} + +data "azurerm_client_config" "current" {} + +#----------------------------------------------------------------------------------------------------------------------- +# Locals +#----------------------------------------------------------------------------------------------------------------------- + +locals { + kubeconfig_path = "${var.context_path}/.kube/config" + rg_name = var.resource_group_name == null ? "windsor-aks-rg-${var.context_id}" : var.resource_group_name + cluster_name = var.cluster_name == null ? "windsor-aks-cluster-${var.context_id}" : var.cluster_name +} + +#----------------------------------------------------------------------------------------------------------------------- +# Resource Groups +#----------------------------------------------------------------------------------------------------------------------- + +resource "azurerm_resource_group" "aks" { + name = local.rg_name + location = var.region +} + +#----------------------------------------------------------------------------------------------------------------------- +# Key Vault +#----------------------------------------------------------------------------------------------------------------------- + +resource "azurerm_key_vault" "key_vault" { + # checkov:skip=CKV2_AZURE_32: We are using a public cluster for testing, there is no need for private endpoints. + name = "aks-keyvault-${var.context_id}" + location = azurerm_resource_group.aks.location + resource_group_name = azurerm_resource_group.aks.name + tenant_id = data.azurerm_client_config.current.tenant_id + sku_name = "premium" + enabled_for_disk_encryption = true + purge_protection_enabled = true + soft_delete_retention_days = 7 + # checkov:skip=CKV_AZURE_189: We are using a public cluster for testing + # private services are encouraged for production + public_network_access_enabled = var.public_network_access_enabled + + # checkov:skip=CKV_AZURE_109: We are using a public cluster for testing + # private services are encouraged for production. Change to "Deny" for production. + network_acls { + default_action = var.network_acls_default_action + bypass = "AzureServices" + } + + access_policy { + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = data.azurerm_client_config.current.object_id + + key_permissions = [ + "Create", + "Delete", + "Get", + "Purge", + "Recover", + "Update", + "GetRotationPolicy", + "SetRotationPolicy" + ] + + secret_permissions = [ + "Set", + ] + } +} + +resource "azurerm_key_vault_access_policy" "key_vault_access_policy_disk" { + key_vault_id = azurerm_key_vault.key_vault.id + + tenant_id = data.azurerm_client_config.current.tenant_id + object_id = azurerm_disk_encryption_set.main.identity.0.principal_id + + key_permissions = [ + "Get", + "Decrypt", + "Encrypt", + "Sign", + "UnwrapKey", + "Verify", + "WrapKey", + ] + + depends_on = [ + azurerm_disk_encryption_set.main + ] +} + +resource "time_static" "expiry" {} + +resource "azurerm_key_vault_key" "key_vault_key" { + name = "aks-key-${var.context_id}" + key_vault_id = azurerm_key_vault.key_vault.id + key_type = "RSA-HSM" + key_size = 2048 + expiration_date = var.expiration_date != null ? var.expiration_date : timeadd(time_static.expiry.rfc3339, "8760h") + + key_opts = [ + "decrypt", + "encrypt", + "sign", + "unwrapKey", + "verify", + "wrapKey", + ] + + rotation_policy { + automatic { + time_before_expiry = "P30D" + } + + expire_after = "P90D" + notify_before_expiry = "P29D" + } +} + +resource "azurerm_disk_encryption_set" "main" { + name = "des-${var.context_id}" + resource_group_name = azurerm_resource_group.aks.name + location = azurerm_resource_group.aks.location + key_vault_key_id = azurerm_key_vault_key.key_vault_key.id + + identity { + type = "SystemAssigned" + } +} + +#----------------------------------------------------------------------------------------------------------------------- +# Log Analytics Workspace +#----------------------------------------------------------------------------------------------------------------------- + +resource "azurerm_log_analytics_workspace" "aks_logs" { + name = "aks-logs-${var.context_id}" + location = azurerm_resource_group.aks.location + resource_group_name = azurerm_resource_group.aks.name + sku = "PerGB2018" + retention_in_days = 30 +} + +#----------------------------------------------------------------------------------------------------------------------- +# AKS Cluster +#----------------------------------------------------------------------------------------------------------------------- + +data "azurerm_subnet" "private" { + count = var.vnet_subnet_id == null ? 1 : 0 + name = "${var.context_id}-private-1" + resource_group_name = var.vnet_resource_group_name == null ? "windsor-vnet-rg-${var.context_id}" : var.vnet_resource_group_name + virtual_network_name = var.vnet_name == null ? "windsor-vnet-${var.context_id}" : var.vnet_name +} + +resource "azurerm_kubernetes_cluster" "main" { + name = local.cluster_name + location = azurerm_resource_group.aks.location + resource_group_name = azurerm_resource_group.aks.name + dns_prefix = local.cluster_name + kubernetes_version = var.kubernetes_version + role_based_access_control_enabled = var.role_based_access_control_enabled + automatic_upgrade_channel = var.automatic_upgrade_channel + sku_tier = var.sku_tier + # checkov:skip=CKV_AZURE_6: This feature is in preview, we are using a public cluster for testing + # api_server_authorized_ip_ranges = [0.0.0.0/0] + # checkov:skip=CKV_AZURE_115: We are using a public cluster for testing + # private clusters are encouraged for production + private_cluster_enabled = var.private_cluster_enabled + disk_encryption_set_id = azurerm_disk_encryption_set.main.id + # checkov:skip=CKV_AZURE_116: This replaces the addon_profile + azure_policy_enabled = var.azure_policy_enabled + # checkov:skip=CKV_AZURE_141: We are setting this to false to avoid the creation of an AD + local_account_disabled = var.local_account_disabled + + key_vault_secrets_provider { + secret_rotation_enabled = true + } + + default_node_pool { + name = var.default_node_pool.name + node_count = var.default_node_pool.node_count + vm_size = var.default_node_pool.vm_size + vnet_subnet_id = coalesce(var.vnet_subnet_id, try(data.azurerm_subnet.private[0].id, null)) + orchestrator_version = var.kubernetes_version + only_critical_addons_enabled = true + # checkov:skip=CKV_AZURE_226: we are using the managed disk type to reduce costs + os_disk_type = var.default_node_pool.os_disk_type + host_encryption_enabled = var.default_node_pool.host_encryption_enabled + # checkov:skip=CKV_AZURE_168: This is set in the variable by default to 50 + max_pods = var.default_node_pool.max_pods + } + + auto_scaler_profile { + balance_similar_node_groups = var.auto_scaler_profile.balance_similar_node_groups + max_graceful_termination_sec = var.auto_scaler_profile.max_graceful_termination_sec + scale_down_delay_after_add = var.auto_scaler_profile.scale_down_delay_after_add + scale_down_delay_after_delete = var.auto_scaler_profile.scale_down_delay_after_delete + scale_down_delay_after_failure = var.auto_scaler_profile.scale_down_delay_after_failure + scan_interval = var.auto_scaler_profile.scan_interval + scale_down_unneeded = var.auto_scaler_profile.scale_down_unneeded + scale_down_unready = var.auto_scaler_profile.scale_down_unready + scale_down_utilization_threshold = var.auto_scaler_profile.scale_down_utilization_threshold + } + + workload_autoscaler_profile { + keda_enabled = var.workload_autoscaler_profile.keda_enabled + vertical_pod_autoscaler_enabled = var.workload_autoscaler_profile.vertical_pod_autoscaler_enabled + } + + network_profile { + network_policy = "azure" + network_plugin = "azure" + } + + oms_agent { + log_analytics_workspace_id = azurerm_log_analytics_workspace.aks_logs.id + } + + identity { + type = "SystemAssigned" + } + + lifecycle { + ignore_changes = [ + default_node_pool[0].node_count + ] + } +} + +resource "azurerm_kubernetes_cluster_node_pool" "autoscaled" { + count = var.autoscaled_node_pool.enabled ? 1 : 0 + name = var.autoscaled_node_pool.name + kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id + vm_size = var.autoscaled_node_pool.vm_size + mode = var.autoscaled_node_pool.mode + auto_scaling_enabled = true + min_count = var.autoscaled_node_pool.min_count + max_count = var.autoscaled_node_pool.max_count + vnet_subnet_id = coalesce(var.vnet_subnet_id, try(data.azurerm_subnet.private[0].id, null)) + orchestrator_version = var.kubernetes_version + # checkov:skip=CKV_AZURE_226: We are using the managed disk type to reduce costs + os_disk_type = var.autoscaled_node_pool.os_disk_type + # checkov:skip=CKV_AZURE_168: This is set in the variable by default to 50 + max_pods = var.autoscaled_node_pool.max_pods + host_encryption_enabled = var.autoscaled_node_pool.host_encryption_enabled +} + +resource "local_file" "kube_config" { + content = azurerm_kubernetes_cluster.main.kube_config_raw + filename = local.kubeconfig_path +} diff --git a/terraform/cluster/azure-aks/outputs.tf b/terraform/cluster/azure-aks/outputs.tf new file mode 100644 index 00000000..e42d9292 --- /dev/null +++ b/terraform/cluster/azure-aks/outputs.tf @@ -0,0 +1,18 @@ +#----------------------------------------------------------------------------------------------------------------------- +# Outputs +#----------------------------------------------------------------------------------------------------------------------- + +output "cluster_name" { + description = "Name of the AKS cluster" + value = azurerm_kubernetes_cluster.main.name +} + +output "resource_group_name" { + description = "Name of the resource group containing the AKS cluster" + value = azurerm_resource_group.aks.name +} + +output "cluster_identity" { + description = "System assigned identity of the AKS cluster" + value = azurerm_kubernetes_cluster.main.identity[0].principal_id +} diff --git a/terraform/cluster/azure-aks/variables.tf b/terraform/cluster/azure-aks/variables.tf new file mode 100644 index 00000000..d269fe5f --- /dev/null +++ b/terraform/cluster/azure-aks/variables.tf @@ -0,0 +1,199 @@ +#----------------------------------------------------------------------------------------------------------------------- +# Variables +#----------------------------------------------------------------------------------------------------------------------- + +variable "resource_group_name" { + description = "Name of the resource group" + type = string + default = null +} + +variable "vnet_resource_group_name" { + description = "Name of the VNET resource group" + type = string + default = null +} + +variable "vnet_name" { + description = "Name of the VNET" + type = string + default = null +} + +variable "vnet_subnet_id" { + description = "ID of the subnet" + type = string + default = null +} + +variable "region" { + description = "Region for the resources" + type = string + default = "eastus" +} + +variable "cluster_name" { + description = "Name of the AKS cluster" + type = string + default = null +} + +variable "kubernetes_version" { + description = "Version of Kubernetes to use" + type = string + default = "1.32" +} + +variable "context_path" { + type = string + description = "The path to the context folder, where kubeconfig is stored" + default = "" +} + +variable "context_id" { + description = "Context ID for the resources" + type = string + default = null +} + +variable "default_node_pool" { + description = "Configuration for the default node pool" + type = object({ + name = string + vm_size = string + os_disk_type = string + max_pods = number + host_encryption_enabled = bool + min_count = number + max_count = number + node_count = number + }) + default = { + name = "system" + vm_size = "Standard_D2s_v3" + os_disk_type = "Managed" + max_pods = 30 + host_encryption_enabled = true + min_count = 1 + max_count = 3 + node_count = 1 + } +} + +variable "autoscaled_node_pool" { + description = "Configuration for the autoscaled node pool" + type = object({ + enabled = bool + name = string + vm_size = string + mode = string + os_disk_type = string + max_pods = number + host_encryption_enabled = bool + min_count = number + max_count = number + }) + default = { + enabled = true + name = "autoscaled" + vm_size = "Standard_D2s_v3" + mode = "User" + os_disk_type = "Managed" + max_pods = 30 + host_encryption_enabled = true + min_count = 1 + max_count = 3 + } +} + +variable "role_based_access_control_enabled" { + type = bool + description = "Whether to enable role-based access control for the AKS cluster" + default = true +} + +variable "auto_scaler_profile" { + type = object({ + balance_similar_node_groups = bool + max_graceful_termination_sec = number + scale_down_delay_after_add = string + scale_down_delay_after_delete = string + scale_down_delay_after_failure = string + scan_interval = string + scale_down_unneeded = string + scale_down_unready = string + scale_down_utilization_threshold = string + }) + description = "Configuration for the AKS cluster's auto-scaler" + default = { + balance_similar_node_groups = true + max_graceful_termination_sec = 600 + scale_down_delay_after_add = "10m" + scale_down_delay_after_delete = "10s" + scale_down_delay_after_failure = "3m" + scan_interval = "10s" + scale_down_unneeded = "10m" + scale_down_unready = "20m" + scale_down_utilization_threshold = "0.5" + } +} + +variable "workload_autoscaler_profile" { + type = object({ + keda_enabled = bool + vertical_pod_autoscaler_enabled = bool + }) + description = "Configuration for the AKS cluster's workload autoscaler" + default = { + keda_enabled = false + vertical_pod_autoscaler_enabled = false + } +} + +variable "automatic_upgrade_channel" { + type = string + description = "The automatic upgrade channel for the AKS cluster" + default = "stable" +} + +variable "sku_tier" { + type = string + description = "The SKU tier for the AKS cluster" + default = "Standard" +} + +variable "private_cluster_enabled" { + type = bool + description = "Whether to enable private cluster for the AKS cluster" + default = false +} + +variable "azure_policy_enabled" { + type = bool + description = "Whether to enable Azure Policy for the AKS cluster" + default = true +} + +variable "local_account_disabled" { + type = bool + description = "Whether to disable local accounts for the AKS cluster" + default = false +} + +variable "public_network_access_enabled" { + type = bool + description = "Whether to enable public network access for the AKS cluster" + default = true +} + +variable "network_acls_default_action" { + type = string + description = "The default action for the AKS cluster's network ACLs" + default = "Allow" +} + +variable "expiration_date" { + type = string + description = "The expiration date for the AKS cluster's key vault" + default = null +} diff --git a/terraform/network/azure-vnet/.terraform.lock.hcl b/terraform/network/azure-vnet/.terraform.lock.hcl new file mode 100644 index 00000000..771c21bc --- /dev/null +++ b/terraform/network/azure-vnet/.terraform.lock.hcl @@ -0,0 +1,21 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/azurerm" { + version = "4.27.0" + hashes = [ + "h1:hmAzHk4XVbrGQ5iJJj1QdFx0aWNW9Hjh+GIE6S8G5I8=", + "zh:0c69edea1995bd3bd9e61980757169c35bf22281b660b5c755b6cb13d08d29d2", + "zh:25b86bf7b9678371d8573983954c571696f3e64a3967133be3b835da36307106", + "zh:49921cff4f26a49bafada60cd07dabb52c5eb35231059ed928a4f4722e269c82", + "zh:4b986166531f9fd1289f01d8220519443e74888a21da512c1b841b006dad6215", + "zh:53fb65b2ca4df637f03e4748a100a7d7fc77249e307c03e294d6259cec0310f6", + "zh:5c0d021a387ca4e2a5a01da009746a08c45f08e971c10d9bda54539d7264d671", + "zh:600043f2b20dc5a45275e43f175c19fe8b6e8e9557a0c884aef018f1f63de90e", + "zh:a0284f6f38912f67bb4cb7829fda3fa75be81fea6a9b21119965c2a839430092", + "zh:a7ac0576e2069ef77557042c6b5157ded364fbd355b2f9bf7f5441622424086e", + "zh:c5db0bcafe986868e28cc6225b68b2d1cf4bf631939d260ca845f17a9aa1677d", + "zh:ce620c0eb71b1fdd925828b30cf232a869abccf1c459180f2f991c4166315251", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} diff --git a/terraform/network/azure-vnet/main.tf b/terraform/network/azure-vnet/main.tf new file mode 100644 index 00000000..aede4282 --- /dev/null +++ b/terraform/network/azure-vnet/main.tf @@ -0,0 +1,125 @@ +#--------------------------------------------------------------------------------------------------- +# Versions +#--------------------------------------------------------------------------------------------------- + +terraform { + required_version = ">=1.8" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 4.27.0" + } + } +} + +#----------------------------------------------------------------------------------------------------------------------- +# Azure Provider configuration +#----------------------------------------------------------------------------------------------------------------------- + +provider "azurerm" { + features {} +} + +#----------------------------------------------------------------------------------------------------------------------- +# Locals +#----------------------------------------------------------------------------------------------------------------------- + +locals { + vnet_name = var.vnet_name == null ? "windsor-vnet-${var.context_id}" : var.vnet_name + rg_name = var.resource_group_name == null ? "windsor-vnet-rg-${var.context_id}" : var.resource_group_name +} + +#----------------------------------------------------------------------------------------------------------------------- +# Resource Group +#----------------------------------------------------------------------------------------------------------------------- + +resource "azurerm_resource_group" "main" { + name = local.rg_name + location = var.region +} + +#----------------------------------------------------------------------------------------------------------------------- +# Virtual Network +#----------------------------------------------------------------------------------------------------------------------- + +resource "azurerm_virtual_network" "main" { + name = local.vnet_name + address_space = [var.vnet_cidr] + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name +} + +#----------------------------------------------------------------------------------------------------------------------- +# Subnets +#----------------------------------------------------------------------------------------------------------------------- + +# Public subnets +resource "azurerm_subnet" "public" { + count = length(var.vnet_subnets["public"]) > 0 ? length(var.vnet_subnets["public"]) : var.vnet_zones + name = "${var.context_id}-public-${count.index + 1}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = length(var.vnet_subnets["public"]) > 0 ? [var.vnet_subnets["public"][count.index]] : ["${join(".", slice(split(".", var.vnet_cidr), 0, 2))}.${count.index + 1}.0/24"] +} + +# Private subnets +resource "azurerm_subnet" "private" { + count = length(var.vnet_subnets["private"]) > 0 ? length(var.vnet_subnets["private"]) : var.vnet_zones + name = "${var.context_id}-private-${count.index + 1}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = length(var.vnet_subnets["private"]) > 0 ? [var.vnet_subnets["private"][count.index]] : ["${join(".", slice(split(".", var.vnet_cidr), 0, 2))}.1${count.index + 1}.0/24"] +} + +# Data subnets +resource "azurerm_subnet" "data" { + count = length(var.vnet_subnets["data"]) > 0 ? length(var.vnet_subnets["data"]) : var.vnet_zones + name = "${var.context_id}-data-${count.index + 1}" + resource_group_name = azurerm_resource_group.main.name + virtual_network_name = azurerm_virtual_network.main.name + address_prefixes = length(var.vnet_subnets["data"]) > 0 ? [var.vnet_subnets["data"][count.index]] : ["${join(".", slice(split(".", var.vnet_cidr), 0, 2))}.2${count.index + 1}.0/24"] +} + +#----------------------------------------------------------------------------------------------------------------------- +# NAT Gateway +#----------------------------------------------------------------------------------------------------------------------- + +# Public IP for NAT Gateway +resource "azurerm_public_ip" "nat" { + count = var.vnet_zones + name = "${var.context_id}-nat-gw-ip-${count.index + 1}" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + allocation_method = "Static" + sku = "Standard" +} + +# NAT Gateway +resource "azurerm_nat_gateway" "main" { + count = var.vnet_zones + name = "${var.context_id}-nat-gw-${count.index + 1}" + location = azurerm_resource_group.main.location + resource_group_name = azurerm_resource_group.main.name + sku_name = "Standard" +} + +# Associate public IP with NAT Gateway +resource "azurerm_nat_gateway_public_ip_association" "main" { + count = var.vnet_zones + nat_gateway_id = azurerm_nat_gateway.main[count.index].id + public_ip_address_id = azurerm_public_ip.nat[count.index].id +} + +# Associate NAT Gateway with private subnet +resource "azurerm_subnet_nat_gateway_association" "private" { + count = var.vnet_zones + subnet_id = azurerm_subnet.private[count.index].id + nat_gateway_id = azurerm_nat_gateway.main[count.index].id +} + +# Associate NAT Gateway with data subnet +resource "azurerm_subnet_nat_gateway_association" "data" { + count = var.vnet_zones + subnet_id = azurerm_subnet.data[count.index].id + nat_gateway_id = azurerm_nat_gateway.main[count.index].id +} diff --git a/terraform/network/azure-vnet/outputs.tf b/terraform/network/azure-vnet/outputs.tf new file mode 100644 index 00000000..64eeba11 --- /dev/null +++ b/terraform/network/azure-vnet/outputs.tf @@ -0,0 +1,18 @@ +#----------------------------------------------------------------------------------------------------------------------- +# Outputs +#----------------------------------------------------------------------------------------------------------------------- + +output "public_subnet_ids" { + description = "IDs of created public subnets" + value = azurerm_subnet.public[*].id +} + +output "private_subnet_ids" { + description = "IDs of created private subnets" + value = azurerm_subnet.private[*].id +} + +output "data_subnet_ids" { + description = "IDs of created data subnets" + value = azurerm_subnet.data[*].id +} diff --git a/terraform/network/azure-vnet/variables.tf b/terraform/network/azure-vnet/variables.tf new file mode 100644 index 00000000..39ad62e6 --- /dev/null +++ b/terraform/network/azure-vnet/variables.tf @@ -0,0 +1,52 @@ + +# Variables +variable "region" { + description = "Region for the resources" + type = string + default = "eastus" +} + +variable "resource_group_name" { + description = "Name of the resource group" + type = string + default = null +} + +variable "vnet_name" { + description = "Name of the VNET" + type = string + default = null +} + +variable "vnet_zones" { + description = "Number of availability zones to create" + type = number + default = 1 +} + +variable "vnet_cidr" { + description = "CIDR block for VNET" + type = string + default = "10.20.0.0/16" +} + +variable "vnet_subnets" { + description = "Subnets to create in the VNET" + type = map(list(string)) + # example: { + # public = ["10.20.1.0/24", "10.20.2.0/24", "10.20.3.0/24"] + # private = ["10.20.11.0/24", "10.20.12.0/24", "10.20.13.0/24"] + # data = ["10.20.21.0/24", "10.20.22.0/24", "10.20.23.0/24"] + # } + default = { + public = [] + private = [] + data = [] + } +} + +variable "context_id" { + description = "Context ID for the resources" + type = string + default = null +}