diff --git a/aws/e6data_with_existing_eks/e6data_engine_iam.tf b/aws/e6data_with_existing_eks/e6data_engine_iam.tf index 34e86ad3..284c65b9 100644 --- a/aws/e6data_with_existing_eks/e6data_engine_iam.tf +++ b/aws/e6data_with_existing_eks/e6data_engine_iam.tf @@ -17,6 +17,28 @@ data "aws_iam_policy_document" "oidc_assume_role_policy" { } } +data "aws_iam_policy_document" "system_tables_policy" { + statement { + sid = "AssumeRole" + effect = "Allow" + + actions = [ + "sts:AssumeRole" + ] + resources = ["arn:aws:iam::${local.cross_account_id}:role/e6-system-tables-*"] + } + + statement { + sid = "TagSession" + effect = "Allow" + + actions = [ + "sts:TagSession" + ] + resources = ["*"] + } +} + data "aws_iam_policy_document" "engine_iam_glue_s3readAccess_doc" { statement { sid = "glueReadOnlyAccess" @@ -60,9 +82,16 @@ resource "aws_iam_policy" "e6data_engine_s3_glue_policy" { policy = data.aws_iam_policy_document.engine_iam_glue_s3readAccess_doc.json } +resource "aws_iam_policy" "e6data_engine_system_tables_policy" { + name = "${local.e6data_workspace_name}-engine-system-tables-${random_string.random.result}" + description = "Allows assume the role for system tables" + policy = data.aws_iam_policy_document.system_tables_policy.json +} + + # Create an IAM role for the engine, allowing it to assume the role with specified policies attached resource "aws_iam_role" "e6data_engine_role" { name = "${local.e6data_workspace_name}-engine-role-${random_string.random.result}" assume_role_policy = data.aws_iam_policy_document.oidc_assume_role_policy.json - managed_policy_arns = [aws_iam_policy.e6data_engine_s3_glue_policy.arn, aws_iam_policy.e6data_s3_read_write_policy.arn] + managed_policy_arns = [aws_iam_policy.e6data_engine_s3_glue_policy.arn, aws_iam_policy.e6data_s3_read_write_policy.arn, aws_iam_policy.e6data_engine_system_tables_policy.arn] } \ No newline at end of file diff --git a/aws/e6data_with_existing_eks/helm.tf b/aws/e6data_with_existing_eks/helm.tf index a0e52291..cb5b7ae6 100644 --- a/aws/e6data_with_existing_eks/helm.tf +++ b/aws/e6data_with_existing_eks/helm.tf @@ -12,8 +12,5 @@ resource "helm_release" "e6data_workspace_deployment" { values = [local.helm_values_file] - lifecycle { - ignore_changes = [values] - } # depends_on = [aws_eks_access_policy_association.tf_runner_auth_policy] } \ No newline at end of file diff --git a/aws/e6data_with_existing_eks/karpenter-provisioner-manifests/nodeclass.yaml b/aws/e6data_with_existing_eks/karpenter-provisioner-manifests/nodeclass.yaml index 8bb7aebd..3f645483 100644 --- a/aws/e6data_with_existing_eks/karpenter-provisioner-manifests/nodeclass.yaml +++ b/aws/e6data_with_existing_eks/karpenter-provisioner-manifests/nodeclass.yaml @@ -22,7 +22,6 @@ spec: volumeType: gp3 userData: | echo "$(jq '.allowedUnsafeSysctls += ["net.core.somaxconn","net.ipv4.ip_local_port_range"]' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json - echo "$(jq '.cpuManagerPolicy = "static"' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json mount_location="/app/tmp" mkdir -p $mount_location yum install nvme-cli -y diff --git a/aws/e6data_with_existing_eks/support.tf b/aws/e6data_with_existing_eks/support.tf index dcc76f5c..2f066013 100644 --- a/aws/e6data_with_existing_eks/support.tf +++ b/aws/e6data_with_existing_eks/support.tf @@ -14,10 +14,7 @@ locals { type = "AWS" oidc_value = aws_iam_role.e6data_engine_role.arn control_plane_user = ["e6data-${var.workspace_name}-user"] - } - karpenter = { - nodepool = local.e6data_nodepool_name - nodeclass = local.e6data_nodeclass_name + debug_namespaces = var.debug_namespaces } }) mapUsers = try(data.kubernetes_config_map_v1.aws_auth_read.data["mapUsers"], "") @@ -74,26 +71,22 @@ data "aws_eks_node_group" "current" { node_group_name = tolist(data.aws_eks_node_groups.current.names)[0] } +data "aws_eks_cluster_auth" "target_eks_auth" { + name = data.aws_eks_cluster.current.name +} + provider "kubernetes" { alias = "eks_e6data" host = data.aws_eks_cluster.current.endpoint cluster_ca_certificate = base64decode(data.aws_eks_cluster.current.certificate_authority[0].data) - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", var.eks_cluster_name] - command = var.aws_command_line_path - } + token = data.aws_eks_cluster_auth.target_eks_auth.token } provider "kubectl" { host = data.aws_eks_cluster.current.endpoint cluster_ca_certificate = base64decode(data.aws_eks_cluster.current.certificate_authority[0].data) load_config_file = false - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", var.eks_cluster_name] - command = var.aws_command_line_path - } + token = data.aws_eks_cluster_auth.target_eks_auth.token } provider "helm" { @@ -101,10 +94,6 @@ provider "helm" { kubernetes { host = data.aws_eks_cluster.current.endpoint cluster_ca_certificate = base64decode(data.aws_eks_cluster.current.certificate_authority[0].data) - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", var.eks_cluster_name] - command = var.aws_command_line_path - } + token = data.aws_eks_cluster_auth.target_eks_auth.token } -} \ No newline at end of file +} diff --git a/aws/e6data_with_existing_eks/terraform.tfvars b/aws/e6data_with_existing_eks/terraform.tfvars index de0937e9..e77110c1 100644 --- a/aws/e6data_with_existing_eks/terraform.tfvars +++ b/aws/e6data_with_existing_eks/terraform.tfvars @@ -18,6 +18,7 @@ bucket_names = ["*"] ### List of bucket names that the e6data engine queries and kubernetes_namespace = "e6data" ### Value of the Kubernetes namespace to deploy the e6data workspace. helm_chart_version = "2.1.7" ### e6data workspace Helm chart version to be used. +debug_namespaces = ["kube-system"] ### Below are the tags which will be applied to all the resources created by this Terraform script. cost_tags = { diff --git a/aws/e6data_with_existing_eks/variables.tf b/aws/e6data_with_existing_eks/variables.tf index ad68c939..5dc254b0 100644 --- a/aws/e6data_with_existing_eks/variables.tf +++ b/aws/e6data_with_existing_eks/variables.tf @@ -83,3 +83,12 @@ variable "nodepool_cpu_limits" { default = 100000 } +variable "debug_namespaces" { + type = list(string) + description = "kaprneter and alb controller namespaces" + default = ["kube-system"] +} + +locals { + cross_account_id = split(":", var.e6data_cross_oidc_role_arn[0])[4] +} \ No newline at end of file diff --git a/aws/e6data_with_existing_vpc/e6data_engine_iam.tf b/aws/e6data_with_existing_vpc/e6data_engine_iam.tf index 34e86ad3..b61f38d6 100644 --- a/aws/e6data_with_existing_vpc/e6data_engine_iam.tf +++ b/aws/e6data_with_existing_vpc/e6data_engine_iam.tf @@ -53,6 +53,28 @@ data "aws_iam_policy_document" "engine_iam_glue_s3readAccess_doc" { } } +data "aws_iam_policy_document" "system_tables_policy" { + statement { + sid = "AssumeRole" + effect = "Allow" + + actions = [ + "sts:AssumeRole" + ] + resources = ["arn:aws:iam::${local.cross_account_id}:role/e6-system-tables-*"] + } + + statement { + sid = "TagSession" + effect = "Allow" + + actions = [ + "sts:TagSession" + ] + resources = ["*"] + } +} + # Create an IAM policy that grants read access to S3 buckets and the Glue catalog resource "aws_iam_policy" "e6data_engine_s3_glue_policy" { name = "${local.e6data_workspace_name}-engine-s3-glue-${random_string.random.result}" @@ -60,9 +82,15 @@ resource "aws_iam_policy" "e6data_engine_s3_glue_policy" { policy = data.aws_iam_policy_document.engine_iam_glue_s3readAccess_doc.json } +resource "aws_iam_policy" "e6data_engine_system_tables_policy" { + name = "${local.e6data_workspace_name}-engine-system-tables-${random_string.random.result}" + description = "Allows assume the role for system tables" + policy = data.aws_iam_policy_document.system_tables_policy.json +} + # Create an IAM role for the engine, allowing it to assume the role with specified policies attached resource "aws_iam_role" "e6data_engine_role" { name = "${local.e6data_workspace_name}-engine-role-${random_string.random.result}" assume_role_policy = data.aws_iam_policy_document.oidc_assume_role_policy.json - managed_policy_arns = [aws_iam_policy.e6data_engine_s3_glue_policy.arn, aws_iam_policy.e6data_s3_read_write_policy.arn] + managed_policy_arns = [aws_iam_policy.e6data_engine_s3_glue_policy.arn, aws_iam_policy.e6data_s3_read_write_policy.arn, aws_iam_policy.e6data_engine_system_tables_policy.arn] } \ No newline at end of file diff --git a/aws/e6data_with_existing_vpc/eks.tf b/aws/e6data_with_existing_vpc/eks.tf index 93123603..bad650ca 100644 --- a/aws/e6data_with_existing_vpc/eks.tf +++ b/aws/e6data_with_existing_vpc/eks.tf @@ -38,26 +38,26 @@ resource "aws_ec2_tag" "cluster_primary_security_group" { value = "e6data" } +data "aws_eks_cluster_auth" "target_eks_auth" { + name = module.eks.cluster_name + + depends_on = [ + module.eks + ] +} + provider "kubernetes" { alias = "e6data" host = module.eks.eks_endpoint cluster_ca_certificate = base64decode(module.eks.eks_certificate_data) - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - command = var.aws_command_line_path - } + token = data.aws_eks_cluster_auth.target_eks_auth.token } provider "kubectl" { host = module.eks.eks_endpoint cluster_ca_certificate = base64decode(module.eks.eks_certificate_data) load_config_file = false - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - command = var.aws_command_line_path - } + token = data.aws_eks_cluster_auth.target_eks_auth.token } provider "helm" { @@ -65,11 +65,7 @@ provider "helm" { kubernetes { host = module.eks.eks_endpoint cluster_ca_certificate = base64decode(module.eks.eks_certificate_data) - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - command = var.aws_command_line_path - } + token = data.aws_eks_cluster_auth.target_eks_auth.token } } diff --git a/aws/e6data_with_existing_vpc/helm.tf b/aws/e6data_with_existing_vpc/helm.tf index 10715e55..f3bcae4e 100644 --- a/aws/e6data_with_existing_vpc/helm.tf +++ b/aws/e6data_with_existing_vpc/helm.tf @@ -12,9 +12,5 @@ resource "helm_release" "e6data_workspace_deployment" { values = [local.helm_values_file] - lifecycle { - ignore_changes = [values] - } - depends_on = [module.eks, aws_eks_node_group.default_node_group, module.e6data_authentication] } \ No newline at end of file diff --git a/aws/e6data_with_existing_vpc/karpenter-provisioner-manifests/nodeclass.yaml b/aws/e6data_with_existing_vpc/karpenter-provisioner-manifests/nodeclass.yaml index 398cc0e9..3f645483 100644 --- a/aws/e6data_with_existing_vpc/karpenter-provisioner-manifests/nodeclass.yaml +++ b/aws/e6data_with_existing_vpc/karpenter-provisioner-manifests/nodeclass.yaml @@ -22,7 +22,6 @@ spec: volumeType: gp3 userData: | echo "$(jq '.allowedUnsafeSysctls += ["net.core.somaxconn","net.ipv4.ip_local_port_range"]' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json - echo "$(jq '.cpuManagerPolicy = "static"' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json mount_location="/app/tmp" mkdir -p $mount_location yum install nvme-cli -y diff --git a/aws/e6data_with_existing_vpc/support.tf b/aws/e6data_with_existing_vpc/support.tf index fff610d5..5f16c0d4 100644 --- a/aws/e6data_with_existing_vpc/support.tf +++ b/aws/e6data_with_existing_vpc/support.tf @@ -17,10 +17,7 @@ locals { type = "AWS" oidc_value = aws_iam_role.e6data_engine_role.arn control_plane_user = ["e6data-${var.workspace_name}-user"] - } - karpenter = { - nodepool = local.e6data_nodepool_name - nodeclass = local.e6data_nodeclass_name + debug_namespaces = var.debug_namespaces } }) } diff --git a/aws/e6data_with_existing_vpc/terraform.tfvars b/aws/e6data_with_existing_vpc/terraform.tfvars index 1537ba33..df73de2c 100644 --- a/aws/e6data_with_existing_vpc/terraform.tfvars +++ b/aws/e6data_with_existing_vpc/terraform.tfvars @@ -10,8 +10,8 @@ workspace_name = "workspace" ### Name of the e6data workspace to be created. helm_chart_version = "2.1.7" ### e6data workspace Helm chart version to be used. # Kubernetes Variables -kube_version = "1.31" ### The Kubernetes cluster version. Version 1.24 or higher is required. -default_nodegroup_kube_version = "1.31" +kube_version = "1.32" ### The Kubernetes cluster version. Version 1.24 or higher is required. +default_nodegroup_kube_version = "1.32" eks_disk_size = 100 ### Disk size for the instances in the nodepool. A minimum of 100 GB is required. nodepool_instance_family = ["t3", "t4g", "t2", "c7g", "c7gd", "c6g", "c8g", "r8g", "i8g", "c6gd", "r6g", "r6gd", "r7g", "r7gd", "i3"] @@ -52,6 +52,8 @@ karpenter_namespace = "kube-system" ### Namespace to deploy the karpe karpenter_service_account_name = "karpenter" ### Service account name for the karpenter karpenter_release_version = "1.0.8" ### Version of the karpenter Helm chart +debug_namespaces = ["kube-system"] + #### Additional ingress/egress rules for the EKS Security Group # additional_ingress_rules = [ # { diff --git a/aws/e6data_with_existing_vpc/variables.tf b/aws/e6data_with_existing_vpc/variables.tf index e80748b5..56a5c3b0 100644 --- a/aws/e6data_with_existing_vpc/variables.tf +++ b/aws/e6data_with_existing_vpc/variables.tf @@ -263,6 +263,11 @@ variable "additional_egress_rules" { default = [] } +variable "debug_namespaces" { + type = list(string) + description = "karpenter and alb controller namespaces" + default = ["kube-system"] +} variable "vpc_cni_version" { description = "Version of the VPC CNI to use" type = string @@ -285,4 +290,8 @@ variable "minimum_ip_target" { description = "Minimum number of IP addresses to keep available for pod assignment." type = number default = 12 +} + +locals { + cross_account_id = split(":", var.e6data_cross_oidc_role_arn[0])[4] } \ No newline at end of file diff --git a/aws/e6data_with_new_eks/default_nodegroup.tf b/aws/e6data_with_new_eks/default_nodegroup.tf index 297a438d..717eb622 100644 --- a/aws/e6data_with_new_eks/default_nodegroup.tf +++ b/aws/e6data_with_new_eks/default_nodegroup.tf @@ -32,7 +32,7 @@ resource "aws_launch_template" "default_nodegroup_launch_template" { metadata_options { http_endpoint = "enabled" http_tokens = "required" - http_put_response_hop_limit = 1 + http_put_response_hop_limit = 2 instance_metadata_tags = "enabled" } @@ -99,4 +99,4 @@ resource "aws_iam_role" "eks_nodegroup_iam_role" { name = "${local.e6data_workspace_name}-${random_string.random.result}" managed_policy_arns = var.eks_nodegroup_iam_policy_arn assume_role_policy = data.aws_iam_policy_document.eks_nodegroup_iam_assume_policy.json -} \ No newline at end of file +} diff --git a/aws/e6data_with_new_eks/e6data_engine_iam.tf b/aws/e6data_with_new_eks/e6data_engine_iam.tf index 999d6e31..8d2e5d5f 100644 --- a/aws/e6data_with_new_eks/e6data_engine_iam.tf +++ b/aws/e6data_with_new_eks/e6data_engine_iam.tf @@ -53,6 +53,28 @@ data "aws_iam_policy_document" "engine_iam_glue_s3readAccess_doc" { } } +data "aws_iam_policy_document" "system_tables_policy" { + statement { + sid = "AssumeRole" + effect = "Allow" + + actions = [ + "sts:AssumeRole" + ] + resources = ["arn:aws:iam::${local.cross_account_id}:role/e6-system-tables-*"] + } + + statement { + sid = "TagSession" + effect = "Allow" + + actions = [ + "sts:TagSession" + ] + resources = ["*"] + } +} + # Create an IAM policy that grants read access to S3 buckets and the Glue catalog resource "aws_iam_policy" "e6data_engine_s3_glue_policy" { name = "${local.e6data_workspace_name}-engine-s3-glue-policy-${random_string.random.result}" @@ -60,9 +82,15 @@ resource "aws_iam_policy" "e6data_engine_s3_glue_policy" { policy = data.aws_iam_policy_document.engine_iam_glue_s3readAccess_doc.json } +resource "aws_iam_policy" "e6data_engine_system_tables_policy" { + name = "${local.e6data_workspace_name}-engine-system-tables-${random_string.random.result}" + description = "Allows assume the role for system tables" + policy = data.aws_iam_policy_document.system_tables_policy.json +} + # Create an IAM role for the engine, allowing it to assume the role with specified policies attached resource "aws_iam_role" "e6data_engine_role" { name = "${local.e6data_workspace_name}-engine-role-${random_string.random.result}" assume_role_policy = data.aws_iam_policy_document.oidc_assume_role_policy.json - managed_policy_arns = [aws_iam_policy.e6data_engine_s3_glue_policy.arn, aws_iam_policy.e6data_s3_read_write_policy.arn] + managed_policy_arns = [aws_iam_policy.e6data_engine_s3_glue_policy.arn, aws_iam_policy.e6data_s3_read_write_policy.arn, aws_iam_policy.e6data_engine_system_tables_policy.arn] } \ No newline at end of file diff --git a/aws/e6data_with_new_eks/eks.tf b/aws/e6data_with_new_eks/eks.tf index 93123603..0f1b1e9a 100644 --- a/aws/e6data_with_new_eks/eks.tf +++ b/aws/e6data_with_new_eks/eks.tf @@ -38,26 +38,26 @@ resource "aws_ec2_tag" "cluster_primary_security_group" { value = "e6data" } +data "aws_eks_cluster_auth" "target_eks_auth" { + name = module.eks.cluster_name + + depends_on = [ + module.eks + ] +} + provider "kubernetes" { alias = "e6data" host = module.eks.eks_endpoint cluster_ca_certificate = base64decode(module.eks.eks_certificate_data) - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - command = var.aws_command_line_path - } + token = data.aws_eks_cluster_auth.target_eks_auth.token } provider "kubectl" { host = module.eks.eks_endpoint cluster_ca_certificate = base64decode(module.eks.eks_certificate_data) load_config_file = false - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - command = var.aws_command_line_path - } + token = data.aws_eks_cluster_auth.target_eks_auth.token } provider "helm" { @@ -65,11 +65,7 @@ provider "helm" { kubernetes { host = module.eks.eks_endpoint cluster_ca_certificate = base64decode(module.eks.eks_certificate_data) - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - command = var.aws_command_line_path - } + token = data.aws_eks_cluster_auth.target_eks_auth.token } } diff --git a/aws/e6data_with_new_eks/helm.tf b/aws/e6data_with_new_eks/helm.tf index 10715e55..f3bcae4e 100644 --- a/aws/e6data_with_new_eks/helm.tf +++ b/aws/e6data_with_new_eks/helm.tf @@ -12,9 +12,5 @@ resource "helm_release" "e6data_workspace_deployment" { values = [local.helm_values_file] - lifecycle { - ignore_changes = [values] - } - depends_on = [module.eks, aws_eks_node_group.default_node_group, module.e6data_authentication] } \ No newline at end of file diff --git a/aws/e6data_with_new_eks/karpenter-provisioner-manifests/nodeclass.yaml b/aws/e6data_with_new_eks/karpenter-provisioner-manifests/nodeclass.yaml index 8bb7aebd..3f645483 100644 --- a/aws/e6data_with_new_eks/karpenter-provisioner-manifests/nodeclass.yaml +++ b/aws/e6data_with_new_eks/karpenter-provisioner-manifests/nodeclass.yaml @@ -22,7 +22,6 @@ spec: volumeType: gp3 userData: | echo "$(jq '.allowedUnsafeSysctls += ["net.core.somaxconn","net.ipv4.ip_local_port_range"]' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json - echo "$(jq '.cpuManagerPolicy = "static"' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json mount_location="/app/tmp" mkdir -p $mount_location yum install nvme-cli -y diff --git a/aws/e6data_with_new_eks/support.tf b/aws/e6data_with_new_eks/support.tf index 6ff25b5d..8765c581 100644 --- a/aws/e6data_with_new_eks/support.tf +++ b/aws/e6data_with_new_eks/support.tf @@ -18,13 +18,9 @@ locals { type = "AWS" oidc_value = aws_iam_role.e6data_engine_role.arn control_plane_user = ["e6data-${var.workspace_name}-user"] - } - karpenter = { - nodepool = local.e6data_nodepool_name - nodeclass = local.e6data_nodeclass_name + debug_namespaces = var.debug_namespaces } }) - } resource "random_string" "random" { diff --git a/aws/e6data_with_new_eks/terraform.tfvars b/aws/e6data_with_new_eks/terraform.tfvars index cd428029..bc6fa881 100644 --- a/aws/e6data_with_new_eks/terraform.tfvars +++ b/aws/e6data_with_new_eks/terraform.tfvars @@ -10,8 +10,8 @@ workspace_name = "workspace" ### Name of the e6data workspace to be created. helm_chart_version = "2.1.7" ### e6data workspace Helm chart version to be used. # Kubernetes Variables -kube_version = "1.31" ### The Kubernetes cluster version. Version 1.24 or higher is required. -default_nodegroup_kube_version = "1.31" +kube_version = "1.32" ### The Kubernetes cluster version. Version 1.24 or higher is required. +default_nodegroup_kube_version = "1.32" eks_disk_size = 100 ### Disk size for the instances in the nodepool. A minimum of 100 GB is required. nodepool_instance_family = ["t3", "t4g", "t2", "c7g", "c7gd", "c6g", "c8g", "r8g", "i8g", "c6gd", "r6g", "r6gd", "r7g", "r7gd", "i3"] @@ -52,6 +52,8 @@ karpenter_namespace = "kube-system" ### Namespace to deploy the karpe karpenter_service_account_name = "karpenter" ### Service account name for the karpenter karpenter_release_version = "1.0.8" ### Version of the karpenter Helm chart +debug_namespaces = ["kube-system"] + #### Additional ingress/egress rules for the EKS Security Group # additional_ingress_rules = [ # { diff --git a/aws/e6data_with_new_eks/variables.tf b/aws/e6data_with_new_eks/variables.tf index fc4e88c4..f8ee2618 100644 --- a/aws/e6data_with_new_eks/variables.tf +++ b/aws/e6data_with_new_eks/variables.tf @@ -262,6 +262,12 @@ variable "additional_egress_rules" { default = [] } +variable "debug_namespaces" { + type = list(string) + description = "kaprneter and alb controller namespaces" + default = ["kube-system"] +} + variable "vpc_cni_version" { description = "Version of the VPC CNI to use" type = string @@ -284,4 +290,8 @@ variable "minimum_ip_target" { description = "Minimum number of IP addresses to keep available for pod assignment." type = number default = 12 +} + +locals { + cross_account_id = split(":", var.e6data_cross_oidc_role_arn[0])[4] } \ No newline at end of file diff --git a/azure/e6data_with_existing_aks/support.tf b/azure/e6data_with_existing_aks/support.tf index 83535b0c..41c16a59 100644 --- a/azure/e6data_with_existing_aks/support.tf +++ b/azure/e6data_with_existing_aks/support.tf @@ -32,10 +32,7 @@ locals { type = "AZURE" oidc_value = azurerm_user_assigned_identity.e6data_identity.client_id control_plane_user = [azurerm_user_assigned_identity.federated_identity.principal_id] - } - karpenter = { - nodepool = local.e6data_nodepool_name - nodeclass = local.e6data_nodeclass_name + debug_namespaces = var.debug_namespaces } }) diff --git a/azure/e6data_with_existing_aks/terraform.tfvars b/azure/e6data_with_existing_aks/terraform.tfvars index 8e1b8ae1..010760b4 100644 --- a/azure/e6data_with_existing_aks/terraform.tfvars +++ b/azure/e6data_with_existing_aks/terraform.tfvars @@ -38,6 +38,8 @@ key_vault_rg_name = "" # The resourc nginx_ingress_controller_namespace = "kube-system" # Namespace where the Nginx Ingress Controller will be deployed nginx_ingress_controller_version = "4.7.1" # Version of the Nginx Ingress Controller to be installed +debug_namespaces = ["kube-system"] + # Toggle to decide whether to deploy the akv2k8s Helm chart. # Set to true to deploy, false to skip deployment. deploy_akv2k8s = false diff --git a/azure/e6data_with_existing_aks/variables.tf b/azure/e6data_with_existing_aks/variables.tf index 7e776552..a1adc1a3 100644 --- a/azure/e6data_with_existing_aks/variables.tf +++ b/azure/e6data_with_existing_aks/variables.tf @@ -116,4 +116,10 @@ variable "deploy_nginx_ingress" { description = "Decide whether to deploy nginx ingress" type = bool default = true +} + +variable "debug_namespaces" { + type = list(string) + description = "kaprneter and alb controller namespaces" + default = ["kube-system"] } \ No newline at end of file diff --git a/azure/e6data_with_existing_vnet/support.tf b/azure/e6data_with_existing_vnet/support.tf index 00845029..1167bbb8 100644 --- a/azure/e6data_with_existing_vnet/support.tf +++ b/azure/e6data_with_existing_vnet/support.tf @@ -35,10 +35,7 @@ locals { type = "AZURE" oidc_value = azurerm_user_assigned_identity.e6data_identity.client_id control_plane_user = [azurerm_user_assigned_identity.federated_identity.principal_id] - } - karpenter = { - nodepool = local.e6data_nodepool_name - nodeclass = local.e6data_nodeclass_name + debug_namespaces = var.debug_namespaces } }) diff --git a/azure/e6data_with_existing_vnet/terraform.tfvars b/azure/e6data_with_existing_vnet/terraform.tfvars index 4f950812..fd07e4e6 100644 --- a/azure/e6data_with_existing_vnet/terraform.tfvars +++ b/azure/e6data_with_existing_vnet/terraform.tfvars @@ -49,6 +49,8 @@ karpenter_namespace = "kube-system" # Namespace for Karpenter deploym karpenter_service_account_name = "karpenter" # Service account name for Karpenter karpenter_release_version = "1.4.0" # Karpenter release version +debug_namespaces = ["kube-system"] + # Key Vault Configuration key_vault_name = "" # Please provide the Key Vault name in which the certificate for the domain is present. If left blank, a new Key Vault will be created in the AKS resource group. key_vault_rg_name = "" # The resource group for the specified Key Vault. If left blank, it will default to the AKS resource group. For more info : https://docs.e6data.com/product-documentation/connectivity/endpoints diff --git a/azure/e6data_with_existing_vnet/variables.tf b/azure/e6data_with_existing_vnet/variables.tf index f78d27d5..00b67a28 100644 --- a/azure/e6data_with_existing_vnet/variables.tf +++ b/azure/e6data_with_existing_vnet/variables.tf @@ -168,3 +168,9 @@ variable "identity_id" { type = string description = "Identity ID from the e6data console." } + +variable "debug_namespaces" { + type = list(string) + description = "kaprneter and alb controller namespaces" + default = ["kube-system"] +} \ No newline at end of file diff --git a/azure/e6data_with_new_aks/support.tf b/azure/e6data_with_new_aks/support.tf index 15471da1..18c6dc02 100644 --- a/azure/e6data_with_new_aks/support.tf +++ b/azure/e6data_with_new_aks/support.tf @@ -43,10 +43,7 @@ locals { type = "AZURE" oidc_value = azurerm_user_assigned_identity.e6data_identity.client_id control_plane_user = [azurerm_user_assigned_identity.federated_identity.principal_id] - } - karpenter = { - nodepool = local.e6data_nodepool_name - nodeclass = local.e6data_nodeclass_name + debug_namespaces = var.debug_namespaces } }) diff --git a/azure/e6data_with_new_aks/terraform.tfvars b/azure/e6data_with_new_aks/terraform.tfvars index 7fae8ea8..f01306a9 100644 --- a/azure/e6data_with_new_aks/terraform.tfvars +++ b/azure/e6data_with_new_aks/terraform.tfvars @@ -47,6 +47,8 @@ karpenter_namespace = "kube-system" # Namespace for Karpenter deploym karpenter_service_account_name = "karpenter" # Service account name for Karpenter karpenter_release_version = "1.4.0" # Karpenter release version +debug_namespaces = ["kube-system"] + # Key Vault Configuration key_vault_name = "" # Please provide the Key Vault name in which the certificate for the domain is present. If left blank, a new Key Vault will be created in the AKS resource group. key_vault_rg_name = "" # The resource group for the specified Key Vault. If left blank, it will default to the AKS resource group. For more info : https://docs.e6data.com/product-documentation/connectivity/endpoints diff --git a/azure/e6data_with_new_aks/variables.tf b/azure/e6data_with_new_aks/variables.tf index 92a634e4..faff8400 100644 --- a/azure/e6data_with_new_aks/variables.tf +++ b/azure/e6data_with_new_aks/variables.tf @@ -161,4 +161,10 @@ variable "identity_id" { variable "nodepool_instance_arch" { type = list(string) description = "Instance arch for nodepool" +} + +variable "debug_namespaces" { + type = list(string) + description = "kaprneter and alb controller namespaces" + default = ["kube-system"] } \ No newline at end of file diff --git a/gcp/e6data_with_existing_gke/support.tf b/gcp/e6data_with_existing_gke/support.tf index ce5c02bb..56e59f2e 100644 --- a/gcp/e6data_with_existing_gke/support.tf +++ b/gcp/e6data_with_existing_gke/support.tf @@ -13,6 +13,7 @@ locals { type = "GCP" oidc_value = google_service_account.workspace_sa.email control_plane_user = var.control_plane_user + debug_namespaces = var.debug_namespaces } }) diff --git a/gcp/e6data_with_existing_gke/terraform.tfvars b/gcp/e6data_with_existing_gke/terraform.tfvars index 2c900159..f5941e37 100644 --- a/gcp/e6data_with_existing_gke/terraform.tfvars +++ b/gcp/e6data_with_existing_gke/terraform.tfvars @@ -17,6 +17,8 @@ spot_enabled = true # A boolean that represents whether the underly kubernetes_namespace = "namespace1" +debug_namespaces = ["kube-system"] + cost_labels = {} # Cost labels for tracking costs # Note: The variable cost_labels only accepts lowercase letters ([a-z]), numeric characters ([0-9]), underscores (_) and dashes (-). diff --git a/gcp/e6data_with_existing_gke/variables.tf b/gcp/e6data_with_existing_gke/variables.tf index f8a9cef3..cbe5a519 100644 --- a/gcp/e6data_with_existing_gke/variables.tf +++ b/gcp/e6data_with_existing_gke/variables.tf @@ -71,3 +71,9 @@ variable "spot_enabled" { type = bool description = "Enable spot instances in node pools" } + +variable "debug_namespaces" { + type = list(string) + description = "kaprneter and alb controller namespaces" + default = ["kube-system"] +} \ No newline at end of file diff --git a/gcp/e6data_with_existing_vpc/modules/gke_cluster/gke.tf b/gcp/e6data_with_existing_vpc/modules/gke_cluster/gke.tf index 799f18d0..87ba62d9 100644 --- a/gcp/e6data_with_existing_vpc/modules/gke_cluster/gke.tf +++ b/gcp/e6data_with_existing_vpc/modules/gke_cluster/gke.tf @@ -5,7 +5,7 @@ resource "google_container_cluster" "gke_cluster" { location = var.region min_master_version = var.gke_version monitoring_service = "monitoring.googleapis.com/kubernetes" - logging_service = "logging.googleapis.com/kubernetes" + logging_service = "none" network = var.network subnetwork = var.subnetwork initial_node_count = var.initial_node_count @@ -70,4 +70,4 @@ resource "google_container_cluster" "gke_cluster" { } -data "google_client_config" "default" {} \ No newline at end of file +data "google_client_config" "default" {} diff --git a/gcp/e6data_with_existing_vpc/support.tf b/gcp/e6data_with_existing_vpc/support.tf index 7c269605..0c0dadcc 100644 --- a/gcp/e6data_with_existing_vpc/support.tf +++ b/gcp/e6data_with_existing_vpc/support.tf @@ -13,6 +13,7 @@ locals { type = "GCP" oidc_value = google_service_account.workspace_sa.email control_plane_user = var.control_plane_user + debug_namespaces = var.debug_namespaces } }) diff --git a/gcp/e6data_with_existing_vpc/terraform.tfvars b/gcp/e6data_with_existing_vpc/terraform.tfvars index eb81b3ae..bbe030cf 100644 --- a/gcp/e6data_with_existing_vpc/terraform.tfvars +++ b/gcp/e6data_with_existing_vpc/terraform.tfvars @@ -44,6 +44,8 @@ authorized_networks = { #External networks that can access the Kubernet # Kubernetes Namespace kubernetes_namespace = "namespace" # The namespace to use for Kubernetes resources +debug_namespaces = ["kube-system"] + # Cost Labels cost_labels = {} # Cost labels for tracking costs # Note: The variable cost_labels only accepts lowercase letters ([a-z]), numeric characters ([0-9]), underscores (_) and dashes (-). diff --git a/gcp/e6data_with_existing_vpc/variables.tf b/gcp/e6data_with_existing_vpc/variables.tf index 37002dcc..c966348c 100644 --- a/gcp/e6data_with_existing_vpc/variables.tf +++ b/gcp/e6data_with_existing_vpc/variables.tf @@ -159,4 +159,10 @@ variable "deletion_protection" { variable "authorized_networks" { type = map(string) description = "authorized_networks" +} + +variable "debug_namespaces" { + type = list(string) + description = "kaprneter and alb controller namespaces" + default = ["kube-system"] } \ No newline at end of file diff --git a/gcp/e6data_with_new_gke/modules/gke_cluster/gke.tf b/gcp/e6data_with_new_gke/modules/gke_cluster/gke.tf index 799f18d0..87ba62d9 100644 --- a/gcp/e6data_with_new_gke/modules/gke_cluster/gke.tf +++ b/gcp/e6data_with_new_gke/modules/gke_cluster/gke.tf @@ -5,7 +5,7 @@ resource "google_container_cluster" "gke_cluster" { location = var.region min_master_version = var.gke_version monitoring_service = "monitoring.googleapis.com/kubernetes" - logging_service = "logging.googleapis.com/kubernetes" + logging_service = "none" network = var.network subnetwork = var.subnetwork initial_node_count = var.initial_node_count @@ -70,4 +70,4 @@ resource "google_container_cluster" "gke_cluster" { } -data "google_client_config" "default" {} \ No newline at end of file +data "google_client_config" "default" {} diff --git a/gcp/e6data_with_new_gke/support.tf b/gcp/e6data_with_new_gke/support.tf index ed54b908..daee2b4b 100644 --- a/gcp/e6data_with_new_gke/support.tf +++ b/gcp/e6data_with_new_gke/support.tf @@ -15,6 +15,7 @@ locals { type = "GCP" oidc_value = google_service_account.workspace_sa.email control_plane_user = var.control_plane_user + debug_namespaces = var.debug_namespaces } }) diff --git a/gcp/e6data_with_new_gke/terraform.tfvars b/gcp/e6data_with_new_gke/terraform.tfvars index 906448aa..fcdef537 100644 --- a/gcp/e6data_with_new_gke/terraform.tfvars +++ b/gcp/e6data_with_new_gke/terraform.tfvars @@ -43,6 +43,8 @@ authorized_networks = { #External networks that can access the Kubernet # Kubernetes Namespace kubernetes_namespace = "namespace" # The namespace to use for Kubernetes resources +debug_namespaces = ["kube-system"] + # Cost Labels cost_labels = {} # Cost labels for tracking costs # Note: The variable cost_labels only accepts lowercase letters ([a-z]), numeric characters ([0-9]), underscores (_) and dashes (-). diff --git a/gcp/e6data_with_new_gke/variables.tf b/gcp/e6data_with_new_gke/variables.tf index 25783214..ced919da 100644 --- a/gcp/e6data_with_new_gke/variables.tf +++ b/gcp/e6data_with_new_gke/variables.tf @@ -149,4 +149,10 @@ variable "deletion_protection" { variable "authorized_networks" { type = map(string) description = "authorized_networks" +} + +variable "debug_namespaces" { + type = list(string) + description = "kaprneter and alb controller namespaces" + default = ["kube-system"] } \ No newline at end of file