From 1db7fb091f717fbdb051c976c9c742ff3541551c Mon Sep 17 00:00:00 2001 From: gregharvey Date: Thu, 22 Jun 2023 17:26:05 +0200 Subject: [PATCH 01/28] Adding container push and build code. --- docs/_Sidebar.md | 1 + docs/roles/deploy_container.md | 28 +++++++++++ roles/deploy_container/README.md | 30 ++++++++++++ roles/deploy_container/defaults/main.yml | 15 ++++++ roles/deploy_container/tasks/main.yml | 49 +++++++++++++++++++ .../deploy_container/templates/Dockerfile.j2 | 7 +++ 6 files changed, 130 insertions(+) create mode 100644 docs/roles/deploy_container.md create mode 100644 roles/deploy_container/README.md create mode 100644 roles/deploy_container/defaults/main.yml create mode 100644 roles/deploy_container/tasks/main.yml create mode 100644 roles/deploy_container/templates/Dockerfile.j2 diff --git a/docs/_Sidebar.md b/docs/_Sidebar.md index 16139d8f..3d47469a 100644 --- a/docs/_Sidebar.md +++ b/docs/_Sidebar.md @@ -31,6 +31,7 @@ - [Data backups](/roles/database_backup) - [MySQL backups](/roles/database_backup/database_backup-mysql) - [Deploy](/roles/deploy_code) + - [Deploy container](/roles/deploy_container) - [Init](/roles/_init) - [LHCI run](/roles/lhci_run) - ["Meta"](/roles/_meta) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md new file mode 100644 index 00000000..46285580 --- /dev/null +++ b/docs/roles/deploy_container.md @@ -0,0 +1,28 @@ +# Deploy container +Step that deploys the codebase in a Docker container image. + + + + + +## Default variables +```yaml +--- +deploy_container: + container_name: "example/example" + container_tag: latest # tag will take format container_name:container_tag + docker_registry_url: https://index.docker.io/v1/ + docker_registry_user: example + docker_registry_pass: asdf1234 + docker_base_command: "docker image build" + docker_build_dir: "{{ _ce_deploy_build_dir }}" + environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template + # Requires the deploy IAM user to have the managed EC2InstanceProfileForImageBuilderECRContainerBuilds policy attached + aws_ecr: + enabled: false # set to true if using AWS ECR + region: eu-west-1 + profile: example + +``` + + diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md new file mode 100644 index 00000000..0647af50 --- /dev/null +++ b/roles/deploy_container/README.md @@ -0,0 +1,30 @@ +# Deploy container +Step that deploys the codebase in a Docker container image. Requires Docker and the `community.docker` collection for Ansible to be installed on your deploy server. This can be handled by [`ce-provision`](https://github.com/codeenigma/ce-provision) using the `ce_deploy` and `docker_ce` roles. + +AWS ECR registries require the AWS CLI user provided for `ce-deploy` to have the managed AWS `EC2InstanceProfileForImageBuilderECRContainerBuilds` policy attached via IAM to allow access to fetch credentials and push containers. + + + + + +## Default variables +```yaml +--- +deploy_container: + container_name: "example/example" + container_tag: latest # tag will take format container_name:container_tag + docker_registry_url: https://index.docker.io/v1/ + docker_registry_user: example + docker_registry_pass: asdf1234 + docker_base_command: "docker image build" + docker_build_dir: "{{ _ce_deploy_build_dir }}" + environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template + # Requires the deploy IAM user to have the managed EC2InstanceProfileForImageBuilderECRContainerBuilds policy attached + aws_ecr: + enabled: false # set to true if using AWS ECR + region: eu-west-1 + profile: example + +``` + + diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml new file mode 100644 index 00000000..4e4d30c3 --- /dev/null +++ b/roles/deploy_container/defaults/main.yml @@ -0,0 +1,15 @@ +--- +deploy_container: + container_name: "example/example" + container_tag: latest # tag will take format container_name:container_tag + docker_registry_url: https://index.docker.io/v1/ + docker_registry_user: example + docker_registry_pass: asdf1234 + docker_base_command: "docker image build" + docker_build_dir: "{{ _ce_deploy_build_dir }}" + environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template + # Requires the deploy IAM user to have the managed EC2InstanceProfileForImageBuilderECRContainerBuilds policy attached + aws_ecr: + enabled: false # set to true if using AWS ECR + region: eu-west-1 + profile: example diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml new file mode 100644 index 00000000..4eb9d87a --- /dev/null +++ b/roles/deploy_container/tasks/main.yml @@ -0,0 +1,49 @@ +--- +# @TODO - for AWS ECR we'll need certain policies attaching to the deploy IAM user +- name: Create Dockerfile from template. + local_action: + module: ansible.builtin.template + src: Dockerfile.j2 + dest: "{{ deploy_container.docker_build_dir }}/Dockerfile" + +- name: Set Docker registry username and password. + ansible.builtin.set_fact: + _docker_registry_username: "{{ deploy_container.docker_registry_user }}" + _docker_registry_password: "{{ deploy_container.docker_registry_pass }}" + +# Token valid for 12 hours +- name: Fetch AWS ECR registry login token. + ansible.builtin.command: + command: "aws ecr get-login-password --region {{ deploy_container.aws_ecr.region }} --profile {{ deploy_container.aws_ecr.profile }}" + when: deploy_container.aws_ecr.enabled + register: _docker_registry_ecr_token + +- name: Set AWS ECR registry password. + ansible.builtin.set_fact: + _docker_registry_password: "{{ _docker_registry_ecr_token.stdout }}" + when: deploy_container.aws_ecr.enabled + +- name: Set AWS ECR registry username. + ansible.builtin.set_fact: + _docker_registry_username: "AWS" + when: deploy_container.aws_ecr.enabled + +- name: Log into Docker registry. + community.docker.docker_login: + registry_url: "{{ deploy_container.docker_registry_url }}" + username: "{{ _docker_registry_username }}" + password: "{{ _docker_registry_password }}" + reauthorize: true + delegate_to: localhost + when: deploy_container.docker_registry_login + +- name: Build and push container image. + community.docker.docker_image: + build: + path: "{{ deploy_container.docker_build_dir }}" + repository: "{{ deploy_container.docker_registry_url }}" + name: "{{ deploy_container.container_name }}" + tag: "{{ deploy_container.container_tag | default('latest') }}" + push: true + source: build + delegate_to: localhost diff --git a/roles/deploy_container/templates/Dockerfile.j2 b/roles/deploy_container/templates/Dockerfile.j2 new file mode 100644 index 00000000..d0f5e81c --- /dev/null +++ b/roles/deploy_container/templates/Dockerfile.j2 @@ -0,0 +1,7 @@ +# Basic Dockerfile example +FROM debian:bullseye-slim +MAINTAINER sysadm@codeenigma.com + +RUN apt-get update +RUN apt-get install –y nginx +CMD ["echo","Image created"] \ No newline at end of file From b45c182875d1984ae6c2f7d02fe5d783ea5971a1 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Thu, 22 Jun 2023 17:54:10 +0200 Subject: [PATCH 02/28] Fixing some minor issues. --- roles/deploy_container/tasks/main.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 4eb9d87a..37013df2 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -10,23 +10,27 @@ ansible.builtin.set_fact: _docker_registry_username: "{{ deploy_container.docker_registry_user }}" _docker_registry_password: "{{ deploy_container.docker_registry_pass }}" + delegate_to: localhost # Token valid for 12 hours - name: Fetch AWS ECR registry login token. ansible.builtin.command: - command: "aws ecr get-login-password --region {{ deploy_container.aws_ecr.region }} --profile {{ deploy_container.aws_ecr.profile }}" + cmd: "aws ecr get-login-password --region {{ deploy_container.aws_ecr.region }} --profile {{ deploy_container.aws_ecr.profile }}" when: deploy_container.aws_ecr.enabled + delegate_to: localhost register: _docker_registry_ecr_token - name: Set AWS ECR registry password. ansible.builtin.set_fact: _docker_registry_password: "{{ _docker_registry_ecr_token.stdout }}" when: deploy_container.aws_ecr.enabled + delegate_to: localhost - name: Set AWS ECR registry username. ansible.builtin.set_fact: _docker_registry_username: "AWS" when: deploy_container.aws_ecr.enabled + delegate_to: localhost - name: Log into Docker registry. community.docker.docker_login: @@ -35,7 +39,6 @@ password: "{{ _docker_registry_password }}" reauthorize: true delegate_to: localhost - when: deploy_container.docker_registry_login - name: Build and push container image. community.docker.docker_image: From 621176754b0be6bb4cd306f5b827c91dd53342a7 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Thu, 22 Jun 2023 18:11:39 +0200 Subject: [PATCH 03/28] Docs updates and template flexibility improvement. --- docs/roles/deploy_container.md | 12 +++++++++++- roles/deploy_container/README.md | 10 +++++++++- roles/deploy_container/defaults/main.yml | 1 + roles/deploy_container/tasks/main.yml | 2 +- .../templates/{Dockerfile.j2 => example.j2} | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) rename roles/deploy_container/templates/{Dockerfile.j2 => example.j2} (80%) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 46285580..e34d028c 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -1,5 +1,14 @@ # Deploy container -Step that deploys the codebase in a Docker container image. +Step that deploys the codebase in a Docker container image. Requires Docker and the `community.docker` collection for Ansible to be installed on your deploy server. You will also need to add a `docker` group and make sure your local deploy user is in that group, for example: + +``` +sudo groupadd docker +sudo usermod -aG docker deploy +``` + +This can be handled automatically by [`ce-provision`](https://github.com/codeenigma/ce-provision) using the `ce_deploy` and `docker_ce` roles. + +AWS ECR registries require the AWS CLI user provided for `ce-deploy` to have the managed AWS `EC2InstanceProfileForImageBuilderECRContainerBuilds` policy attached via IAM to allow access to fetch credentials and push containers. @@ -16,6 +25,7 @@ deploy_container: docker_registry_pass: asdf1234 docker_base_command: "docker image build" docker_build_dir: "{{ _ce_deploy_build_dir }}" + dockerfile_template: example.j2 # provide a templates directory next to your playbook and change this to match your Dockerfile template name environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template # Requires the deploy IAM user to have the managed EC2InstanceProfileForImageBuilderECRContainerBuilds policy attached aws_ecr: diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 0647af50..e34d028c 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -1,5 +1,12 @@ # Deploy container -Step that deploys the codebase in a Docker container image. Requires Docker and the `community.docker` collection for Ansible to be installed on your deploy server. This can be handled by [`ce-provision`](https://github.com/codeenigma/ce-provision) using the `ce_deploy` and `docker_ce` roles. +Step that deploys the codebase in a Docker container image. Requires Docker and the `community.docker` collection for Ansible to be installed on your deploy server. You will also need to add a `docker` group and make sure your local deploy user is in that group, for example: + +``` +sudo groupadd docker +sudo usermod -aG docker deploy +``` + +This can be handled automatically by [`ce-provision`](https://github.com/codeenigma/ce-provision) using the `ce_deploy` and `docker_ce` roles. AWS ECR registries require the AWS CLI user provided for `ce-deploy` to have the managed AWS `EC2InstanceProfileForImageBuilderECRContainerBuilds` policy attached via IAM to allow access to fetch credentials and push containers. @@ -18,6 +25,7 @@ deploy_container: docker_registry_pass: asdf1234 docker_base_command: "docker image build" docker_build_dir: "{{ _ce_deploy_build_dir }}" + dockerfile_template: example.j2 # provide a templates directory next to your playbook and change this to match your Dockerfile template name environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template # Requires the deploy IAM user to have the managed EC2InstanceProfileForImageBuilderECRContainerBuilds policy attached aws_ecr: diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index 4e4d30c3..8b1e8e69 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -7,6 +7,7 @@ deploy_container: docker_registry_pass: asdf1234 docker_base_command: "docker image build" docker_build_dir: "{{ _ce_deploy_build_dir }}" + dockerfile_template: example.j2 # provide a templates directory next to your playbook and change this to match your Dockerfile template name environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template # Requires the deploy IAM user to have the managed EC2InstanceProfileForImageBuilderECRContainerBuilds policy attached aws_ecr: diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 37013df2..2b037a40 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -3,7 +3,7 @@ - name: Create Dockerfile from template. local_action: module: ansible.builtin.template - src: Dockerfile.j2 + src: "{{ deploy_container.dockerfile_template }}" dest: "{{ deploy_container.docker_build_dir }}/Dockerfile" - name: Set Docker registry username and password. diff --git a/roles/deploy_container/templates/Dockerfile.j2 b/roles/deploy_container/templates/example.j2 similarity index 80% rename from roles/deploy_container/templates/Dockerfile.j2 rename to roles/deploy_container/templates/example.j2 index d0f5e81c..5a155197 100644 --- a/roles/deploy_container/templates/Dockerfile.j2 +++ b/roles/deploy_container/templates/example.j2 @@ -3,5 +3,5 @@ FROM debian:bullseye-slim MAINTAINER sysadm@codeenigma.com RUN apt-get update -RUN apt-get install –y nginx +RUN apt-get install -y nginx CMD ["echo","Image created"] \ No newline at end of file From 3eecd60fd7e5e9109ac83dcba0a55cd194e05a3e Mon Sep 17 00:00:00 2001 From: gregharvey Date: Thu, 22 Jun 2023 18:20:07 +0200 Subject: [PATCH 04/28] Creating separate registry name for handling docker push. --- roles/deploy_container/defaults/main.yml | 1 + roles/deploy_container/tasks/main.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index 8b1e8e69..77fedec4 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -2,6 +2,7 @@ deploy_container: container_name: "example/example" container_tag: latest # tag will take format container_name:container_tag + docker_registry_name: index.docker.io docker_registry_url: https://index.docker.io/v1/ docker_registry_user: example docker_registry_pass: asdf1234 diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 2b037a40..f8984bcc 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -44,7 +44,7 @@ community.docker.docker_image: build: path: "{{ deploy_container.docker_build_dir }}" - repository: "{{ deploy_container.docker_registry_url }}" + repository: "{{ deploy_container.docker_registry_name }}" name: "{{ deploy_container.container_name }}" tag: "{{ deploy_container.container_tag | default('latest') }}" push: true From a7948552f6df758182022f2acd6b9a12688b2db7 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Thu, 22 Jun 2023 18:33:24 +0200 Subject: [PATCH 05/28] Trying to get the registry name right. --- docs/roles/deploy_container.md | 4 ++-- roles/deploy_container/README.md | 4 ++-- roles/deploy_container/defaults/main.yml | 5 ++--- roles/deploy_container/tasks/main.yml | 3 +-- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index e34d028c..9730b01e 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -18,9 +18,9 @@ AWS ECR registries require the AWS CLI user provided for `ce-deploy` to have the ```yaml --- deploy_container: - container_name: "example/example" + container_name: example container_tag: latest # tag will take format container_name:container_tag - docker_registry_url: https://index.docker.io/v1/ + docker_registry_name: index.docker.io/example # combines with container_name to make the full registry name, docker_registry_name/container_name docker_registry_user: example docker_registry_pass: asdf1234 docker_base_command: "docker image build" diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index e34d028c..9730b01e 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -18,9 +18,9 @@ AWS ECR registries require the AWS CLI user provided for `ce-deploy` to have the ```yaml --- deploy_container: - container_name: "example/example" + container_name: example container_tag: latest # tag will take format container_name:container_tag - docker_registry_url: https://index.docker.io/v1/ + docker_registry_name: index.docker.io/example # combines with container_name to make the full registry name, docker_registry_name/container_name docker_registry_user: example docker_registry_pass: asdf1234 docker_base_command: "docker image build" diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index 77fedec4..3578e12a 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -1,9 +1,8 @@ --- deploy_container: - container_name: "example/example" + container_name: example container_tag: latest # tag will take format container_name:container_tag - docker_registry_name: index.docker.io - docker_registry_url: https://index.docker.io/v1/ + docker_registry_name: index.docker.io/example # combines with container_name to make the full registry name, docker_registry_name/container_name docker_registry_user: example docker_registry_pass: asdf1234 docker_base_command: "docker image build" diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index f8984bcc..c58bb8b1 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -44,8 +44,7 @@ community.docker.docker_image: build: path: "{{ deploy_container.docker_build_dir }}" - repository: "{{ deploy_container.docker_registry_name }}" - name: "{{ deploy_container.container_name }}" + name: "{{ deploy_container.docker_registry_name }}/{{ deploy_container.container_name }}" tag: "{{ deploy_container.container_tag | default('latest') }}" push: true source: build From 6c6db266edf776558533fefd26ad3a273f94d36a Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 12:46:16 +0200 Subject: [PATCH 06/28] Adding AWS stack components to ECS builds. --- docs/roles/deploy_container.md | 64 ++++- roles/deploy_container/README.md | 64 ++++- roles/deploy_container/defaults/main.yml | 64 ++++- roles/deploy_container/tasks/main.yml | 297 ++++++++++++++++++++++- roles/deploy_container/tasks/subnet.yml | 13 + 5 files changed, 495 insertions(+), 7 deletions(-) create mode 100644 roles/deploy_container/tasks/subnet.yml diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 9730b01e..0e4fe6fc 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -31,7 +31,69 @@ deploy_container: aws_ecr: enabled: false # set to true if using AWS ECR region: eu-west-1 - profile: example + aws_profile: example + # Requires the deploy IAM user to have the managed AmazonECS_FullAccess and ElasticLoadBalancingFullAccess policies attached + # Note, you can if you wish make more restrictive roles and policies + aws_ecs: + enabled: false + region: eu-west-1 + aws_profile: example + tags: {} + domain_name: www.example.com + route_53: + zone: example.com + aws_profile: example2 # might not be the same account + vpc_name: example + #vpc_id: vpc-XXXXXXX # optionally specify VPC ID to use + subnets: # list of public subnet names + - example-dev-a + - example-dev-b + security_groups: [] # list of security groups, accepts names or IDs + cluster_name: example + family_name: example + task_definition_revision: "" # integer, but must be presented as a string for Jinja2 + task_count: 1 + task_minimum_count: 1 + task_maximum_count: 4 + # See docs for values: https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html + service_autoscale_metric_type: ECSServiceAverageCPUUtilization + service_autoscale_up_cooldown: 120 + service_autoscale_down_cooldown: 120 + service_autoscale_target_value: 70 # the value to trigger a scaling event at + service_elb_container_name: example # the name of the container to be load balanced + execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable + containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers + #cpu: 512 # these values can be set globally or per container + #memory: 1024 + launch_type: FARGATE + network_mode: awsvpc + #volumes: [] # list of additional volumes to attach + target_group_name: example # 32 character limit + target_group_protocol: http + target_group_port: 80 + targets: [] # typically we do not specify targets at this point, this will be handled automatically by the ECS service + #- Id: 10.0.0.2 + # Port: 80 + # AvailabilityZone: all + health_check: + protocol: http + path: / + response_codes: "200" + # Requires the deploy IAM user to have the managed AWSCertificateManagerFullAccess and AmazonRoute53FullAccess policies attached + acm: # see https://github.com/codeenigma/ce-provision/tree/1.x/roles/aws/aws_acm + create_cert: false + extra_domains: [] # list of Subject Alternative Name domains and zones + ssl_certificate_ARN: "" # optional SSL cert ARN if you imported one into AWS Certificate Manager + elb_security_groups: [] # default SG is used if none provided - module supports names or IDs + elb_http_port: 80 + elb_https_port: 443 + elb_ssl_policy: ELBSecurityPolicy-TLS13-1-2-2021-06 # see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies + elb_listener_http_rules: [] + elb_listener_https_rules: [] + # Add custom listeners. See https://docs.ansible.com/ansible/latest/collections/amazon/aws/elb_application_lb_module.html + elb_listeners: [] + elb_idle_timeout: 60 + elb_ip_address_type: "ipv4" # Can be 'ipv4' or 'dualstack' (the latter includes IPv4 and IPv6 addresses). ``` diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 9730b01e..0e4fe6fc 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -31,7 +31,69 @@ deploy_container: aws_ecr: enabled: false # set to true if using AWS ECR region: eu-west-1 - profile: example + aws_profile: example + # Requires the deploy IAM user to have the managed AmazonECS_FullAccess and ElasticLoadBalancingFullAccess policies attached + # Note, you can if you wish make more restrictive roles and policies + aws_ecs: + enabled: false + region: eu-west-1 + aws_profile: example + tags: {} + domain_name: www.example.com + route_53: + zone: example.com + aws_profile: example2 # might not be the same account + vpc_name: example + #vpc_id: vpc-XXXXXXX # optionally specify VPC ID to use + subnets: # list of public subnet names + - example-dev-a + - example-dev-b + security_groups: [] # list of security groups, accepts names or IDs + cluster_name: example + family_name: example + task_definition_revision: "" # integer, but must be presented as a string for Jinja2 + task_count: 1 + task_minimum_count: 1 + task_maximum_count: 4 + # See docs for values: https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html + service_autoscale_metric_type: ECSServiceAverageCPUUtilization + service_autoscale_up_cooldown: 120 + service_autoscale_down_cooldown: 120 + service_autoscale_target_value: 70 # the value to trigger a scaling event at + service_elb_container_name: example # the name of the container to be load balanced + execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable + containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers + #cpu: 512 # these values can be set globally or per container + #memory: 1024 + launch_type: FARGATE + network_mode: awsvpc + #volumes: [] # list of additional volumes to attach + target_group_name: example # 32 character limit + target_group_protocol: http + target_group_port: 80 + targets: [] # typically we do not specify targets at this point, this will be handled automatically by the ECS service + #- Id: 10.0.0.2 + # Port: 80 + # AvailabilityZone: all + health_check: + protocol: http + path: / + response_codes: "200" + # Requires the deploy IAM user to have the managed AWSCertificateManagerFullAccess and AmazonRoute53FullAccess policies attached + acm: # see https://github.com/codeenigma/ce-provision/tree/1.x/roles/aws/aws_acm + create_cert: false + extra_domains: [] # list of Subject Alternative Name domains and zones + ssl_certificate_ARN: "" # optional SSL cert ARN if you imported one into AWS Certificate Manager + elb_security_groups: [] # default SG is used if none provided - module supports names or IDs + elb_http_port: 80 + elb_https_port: 443 + elb_ssl_policy: ELBSecurityPolicy-TLS13-1-2-2021-06 # see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies + elb_listener_http_rules: [] + elb_listener_https_rules: [] + # Add custom listeners. See https://docs.ansible.com/ansible/latest/collections/amazon/aws/elb_application_lb_module.html + elb_listeners: [] + elb_idle_timeout: 60 + elb_ip_address_type: "ipv4" # Can be 'ipv4' or 'dualstack' (the latter includes IPv4 and IPv6 addresses). ``` diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index 3578e12a..d84b0d72 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -13,4 +13,66 @@ deploy_container: aws_ecr: enabled: false # set to true if using AWS ECR region: eu-west-1 - profile: example + aws_profile: example + # Requires the deploy IAM user to have the managed AmazonECS_FullAccess and ElasticLoadBalancingFullAccess policies attached + # Note, you can if you wish make more restrictive roles and policies + aws_ecs: + enabled: false + region: eu-west-1 + aws_profile: example + tags: {} + domain_name: www.example.com + route_53: + zone: example.com + aws_profile: example2 # might not be the same account + vpc_name: example + #vpc_id: vpc-XXXXXXX # optionally specify VPC ID to use + subnets: # list of public subnet names + - example-dev-a + - example-dev-b + security_groups: [] # list of security groups, accepts names or IDs + cluster_name: example + family_name: example + task_definition_revision: "" # integer, but must be presented as a string for Jinja2 + task_count: 1 + task_minimum_count: 1 + task_maximum_count: 4 + # See docs for values: https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html + service_autoscale_metric_type: ECSServiceAverageCPUUtilization + service_autoscale_up_cooldown: 120 + service_autoscale_down_cooldown: 120 + service_autoscale_target_value: 70 # the value to trigger a scaling event at + service_elb_container_name: example # the name of the container to be load balanced + execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable + containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers + #cpu: 512 # these values can be set globally or per container + #memory: 1024 + launch_type: FARGATE + network_mode: awsvpc + #volumes: [] # list of additional volumes to attach + target_group_name: example # 32 character limit + target_group_protocol: http + target_group_port: 80 + targets: [] # typically we do not specify targets at this point, this will be handled automatically by the ECS service + #- Id: 10.0.0.2 + # Port: 80 + # AvailabilityZone: all + health_check: + protocol: http + path: / + response_codes: "200" + # Requires the deploy IAM user to have the managed AWSCertificateManagerFullAccess and AmazonRoute53FullAccess policies attached + acm: # see https://github.com/codeenigma/ce-provision/tree/1.x/roles/aws/aws_acm + create_cert: false + extra_domains: [] # list of Subject Alternative Name domains and zones + ssl_certificate_ARN: "" # optional SSL cert ARN if you imported one into AWS Certificate Manager + elb_security_groups: [] # default SG is used if none provided - module supports names or IDs + elb_http_port: 80 + elb_https_port: 443 + elb_ssl_policy: ELBSecurityPolicy-TLS13-1-2-2021-06 # see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies + elb_listener_http_rules: [] + elb_listener_https_rules: [] + # Add custom listeners. See https://docs.ansible.com/ansible/latest/collections/amazon/aws/elb_application_lb_module.html + elb_listeners: [] + elb_idle_timeout: 60 + elb_ip_address_type: "ipv4" # Can be 'ipv4' or 'dualstack' (the latter includes IPv4 and IPv6 addresses). diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index c58bb8b1..dd64309f 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -1,5 +1,5 @@ --- -# @TODO - for AWS ECR we'll need certain policies attaching to the deploy IAM user +# Build and ship a container image - name: Create Dockerfile from template. local_action: module: ansible.builtin.template @@ -12,10 +12,9 @@ _docker_registry_password: "{{ deploy_container.docker_registry_pass }}" delegate_to: localhost -# Token valid for 12 hours -- name: Fetch AWS ECR registry login token. +- name: Fetch AWS ECR registry login token. # token valid for 12 hours ansible.builtin.command: - cmd: "aws ecr get-login-password --region {{ deploy_container.aws_ecr.region }} --profile {{ deploy_container.aws_ecr.profile }}" + cmd: "aws ecr get-login-password --region {{ deploy_container.aws_ecr.region }} --profile {{ deploy_container.aws_ecr.aws_profile }}" when: deploy_container.aws_ecr.enabled delegate_to: localhost register: _docker_registry_ecr_token @@ -49,3 +48,293 @@ push: true source: build delegate_to: localhost + +# Fetch the ACM role from ce-provision +- name: Ensure the aws_acm directory exists. + ansible.builtin.file: + path: "{{ _ce_deploy_base_dir }}/roles/aws_acm/{{ item }}" + state: directory + mode: '0755' + delegate_to: localhost + with_items: + - tasks + - defaults + +- name: Fetch the aws_acm files. + ansible.builtin.get_url: + url: "https://raw.githubusercontent.com/codeenigma/ce-provision/1.x/roles/aws/aws_acm/{{ item }}/main.yml" + dest: "{{ _ce_deploy_base_dir }}/roles/aws_acm/{{ item }}/main.yml" + delegate_to: localhost + with_items: + - tasks + - defaults + +- name: Fetch the aws_acm tasks. + ansible.builtin.get_url: + url: https://raw.githubusercontent.com/codeenigma/ce-provision/1.x/roles/aws/aws_acm/tasks/main.yml + dest: "{{ _ce_deploy_base_dir }}/roles/aws_acm/tasks/main.yml" + delegate_to: localhost + +# Gather all network information +- name: Gather VPC information. + amazon.aws.ec2_vpc_net_info: + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + region: "{{ deploy_container.aws_ecs.region }}" + filters: + "tag:Name": "{{ deploy_container.aws_ecs.vpc_name }}" + register: _aws_ecs_cluster_vpc + delegate_to: localhost + when: + - deploy_container.aws_ecs.enabled + - deploy_container.aws_ecs.vpc_name is defined + - deploy_container.aws_ecs.vpc_name | length > 0 + +- name: Set the VPC id from name. + ansible.builtin.set_fact: + _aws_ecs_cluster_vpc_id: "{{ _aws_ecs_cluster_vpc.vpcs[0].vpc_id }}" + when: + - deploy_container.aws_ecs.enabled + - deploy_container.aws_ecs.vpc_name is defined + - deploy_container.aws_ecs.vpc_name | length > 0 + +- name: Use provided VPC id. + ansible.builtin.set_fact: + _aws_ecs_cluster_vpc_id: "{{ deploy_container.aws_ecs.vpc_id }}" + when: + - deploy_container.aws_ecs.enabled + - (deploy_container.aws_ecs.vpc_name is not defined or deploy_container.aws_ecs.vpc_name | length < 0) + +- name: Reset subnets lists. + ansible.builtin.set_fact: + _aws_ecs_cluster_public_subnets_ids: [] + when: deploy_container.aws_ecs.enabled + +- name: Construct list of public subnet IDs. + ansible.builtin.include_tasks: subnet.yml + with_items: "{{ deploy_container.aws_ecs.subnets }}" + loop_control: + loop_var: subnet + when: deploy_container.aws_ecs.enabled + +# Construct AWS supporting assets +- name: Create task definition. + community.aws.ecs_taskdefinition: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + family: "{{ deploy_container.aws_ecs.family_name }}" + execution_role_arn: "{{ deploy_container.aws_ecs.execution_role_arn }}" + containers: "{{ deploy_container.aws_ecs.containers }}" + launch_type: "{{ deploy_container.aws_ecs.launch_type }}" + cpu: "{{ deploy_container.aws_ecs.cpu | default(omit) }}" + memory: "{{ deploy_container.aws_ecs.memory | default(omit) }}" + state: present + network_mode: "{{ deploy_container.aws_ecs.network_mode }}" + volumes: "{{ deploy_container.aws_ecs.volumes | default(omit) }}" + delegate_to: localhost + when: deploy_container.aws_ecs.enabled + +- name: Create a target group with IP address targets. + community.aws.elb_target_group: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + name: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" # 32 char limit + protocol: "{{ deploy_container.aws_ecs.target_group_protocol }}" + port: "{{ deploy_container.aws_ecs.target_group_port }}" + vpc_id: "{{ _aws_ecs_cluster_vpc_id }}" + health_check_protocol: "{{ deploy_container.aws_ecs.health_check.protocol }}" + health_check_path: "{{ deploy_container.aws_ecs.health_check.path }}" + successful_response_codes: "{{ deploy_container.aws_ecs.health_check.response_codes }}" + target_type: ip + targets: "{{ deploy_container.aws_ecs.targets }}" + state: present + wait_timeout: 200 + wait: true + register: _aws_ecs_target_group + delegate_to: localhost + when: deploy_container.aws_ecs.enabled + +- name: Create SSL certificate for load balancer. + ansible.builtin.include_role: + name: aws_acm + vars: + aws_acm: + export: false + domain_name: "{{ deploy_container.aws_ecs.domain_name }}" + extra_domains: "{{ deploy_container.aws_ecs.acm.extra_domains }}" + route_53: + aws_profile: "{{ deploy_container.aws_ecs.route_53.aws_profile }}" + zone: "{{ deploy_container.aws_ecs.route_53.zone }}" + when: + - deploy_container.aws_ecs.acm.create_cert + - deploy_container.aws_ecs.enabled + +- name: Default to provided SSL certificate ARN. + ansible.builtin.set_fact: + _ssl_certificate_ARN: "{{ deploy_container.aws_ecs.ssl_certificate_ARN }}" + when: deploy_container.aws_ecs.enabled + +- name: If provided, override SSL certificate ARN with the one received from ACM. + ansible.builtin.set_fact: + _ssl_certificate_ARN: "{{ aws_acm_certificate_arn }}" + when: + - deploy_container.aws_ecs.acm.create_cert + - deploy_container.aws_ecs.enabled + +- name: Define default ALB listeners. + ansible.builtin.set_fact: + _aws_ecs_cluster_listeners_http: + Protocol: HTTP + Port: "{{ deploy_container.aws_ecs.elb_http_port }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" + Rules: "{{ deploy_container.aws_ecs.elb_listener_http_rules }}" + _aws_ecs_cluster_listeners_redirect: + Protocol: HTTP + Port: "{{ deploy_container.aws_ecs.elb_http_port }}" + DefaultActions: + - Type: redirect + RedirectConfig: + Protocol: HTTPS + Host: "#{host}" + Query: "#{query}" + Path: "/#{path}" + Port: "{{ deploy_container.aws_ecs.elb_https_port }}" + StatusCode: HTTP_301 + _aws_ecs_cluster_listeners_https: + Protocol: HTTPS + Port: "{{ deploy_container.aws_ecs.elb_https_port }}" + SslPolicy: "{{ deploy_container.aws_ecs.elb_ssl_policy }}" + Certificates: + - CertificateArn: "{{ _ssl_certificate_ARN }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" + Rules: "{{ deploy_container.aws_ecs.elb_listener_https_rules }}" + when: deploy_container.aws_ecs.enabled + +- name: Add HTTP listeners. + ansible.builtin.set_fact: + _aws_ecs_cluster_listeners: "{{ [ _aws_ecs_cluster_listeners_http ] }}" + when: + - _ssl_certificate_ARN | length < 1 + - deploy_container.aws_ecs.enabled + +- name: Add HTTPS Listener. + ansible.builtin.set_fact: + _aws_ecs_cluster_listeners: "{{ [ _aws_ecs_cluster_listeners_redirect, _aws_ecs_cluster_listeners_https ] }}" + when: + - _ssl_certificate_ARN | length > 1 + - deploy_container.aws_ecs.enabled + +- name: Add custom Listeners. + ansible.builtin.set_fact: + _aws_ecs_cluster_listeners: "{{ _aws_ecs_cluster_listeners + deploy_container.aws_ecs.elb_listeners }}" + when: + - deploy_container.aws_ecs.elb_listeners is defined + - deploy_container.aws_ecs.elb_listeners | length + - deploy_container.aws_ecs.enabled + +- name: Create an ALB. + amazon.aws.elb_application_lb: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + name: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" # 32 char limit + state: present + tags: "{{ deploy_container.aws_ecs.tags }}" + subnets: "{{ _aws_ecs_cluster_public_subnets_ids }}" + security_groups: "{{ deploy_container.aws_ecs.elb_security_groups }}" + listeners: "{{ _aws_ecs_cluster_listeners }}" + idle_timeout: "{{ deploy_container.aws_ecs.elb_idle_timeout }}" + ip_address_type: "{{ aws_elb.ip_address_type }}" + register: _aws_ecs_cluster_alb + delegate_to: localhost + when: deploy_container.aws_ecs.enabled + +- name: Set task definition name. + ansible.builtin.set_fact: + _aws_ecs_service_task_definition: "{{ deploy_container.aws_ecs.family_name }}" + when: deploy_container.aws_ecs.enabled + +- name: Set task definition revision if applicable. + ansible.builtin.set_fact: + _aws_ecs_service_task_definition: "{{ deploy_container.aws_ecs.family_name }}:{{ deploy_container.aws_ecs.task_definition_revision }}" + when: + - deploy_container.aws_ecs.task_definition_revision | length > 0 + - deploy_container.aws_ecs.enabled + +- name: Create ECS service. + community.aws.ecs_service: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + state: present + name: "{{ deploy_container.aws_ecs.family_name }}" + cluster: "{{ deploy_container.aws_ecs.cluster_name }}" + task_definition: "{{ _aws_ecs_service_task_definition }}" + desired_count: "{{ deploy_container.aws_ecs.task_count }}" + launch_type: "{{ deploy_container.aws_ecs.launch_type }}" + platform_version: LATEST + load_balancers: # see https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LoadBalancer.html + - containerName: "{{ deploy_container.aws_ecs.service_elb_container_name }}" + containerPort: "{{ deploy_container.aws_ecs.target_group_port }}" + targetGroupArn: "{{ _aws_ecs_target_group.target_group_arn }}" + network_configuration: + subnets: "{{ _aws_ecs_cluster_public_subnets_ids }}" + security_groups: "{{ deploy_container.aws_ecs.security_groups }}" + assign_public_ip: true # must be true for now - details: https://stackoverflow.com/a/66802973 + tags: "{{ deploy_container.aws_ecs.tags }}" + wait: true + delegate_to: localhost + when: deploy_container.aws_ecs.enabled + +- name: Create target tracking scaling policy for ECS service. + community.aws.application_autoscaling_policy: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + state: present + policy_name: "{{ deploy_container.aws_ecs.family_name }}" + service_namespace: ecs + resource_id: "service/{{ deploy_container.aws_ecs.cluster_name }}/{{ deploy_container.aws_ecs.family_name }}" + scalable_dimension: ecs:service:DesiredCount + minimum_tasks: "{{ deploy_container.aws_ecs.task_minimum_count }}" + maximum_tasks: "{{ deploy_container.aws_ecs.task_maximum_count }}" + policy_type: TargetTrackingScaling + target_tracking_scaling_policy_configuration: + PredefinedMetricSpecification: + PredefinedMetricType: "{{ deploy_container.aws_ecs.service_autoscale_metric_type }}" + ScaleInCooldown: "{{ deploy_container.aws_ecs.service_autoscale_up_cooldown }}" + ScaleOutCooldown: "{{ deploy_container.aws_ecs.service_autoscale_down_cooldown }}" + DisableScaleIn: false + TargetValue: "{{ deploy_container.aws_ecs.service_autoscale_target_value }}" + delegate_to: localhost + when: deploy_container.aws_ecs.enabled + +- name: Initialise the domains loop var with main domain entry DNS settings. + ansible.builtin.set_fact: + _aws_ecs_cluster_dns_all_domains: + - domain: "{{ deploy_container.aws_ecs.domain_name }}" + zone: "{{ deploy_container.aws_ecs.route_53.zone }}" + aws_profile: "{{ deploy_container.aws_ecs.route_53.aws_profile }}" + when: deploy_container.aws_ecs.enabled + +- name: Add extra_domains so we can loop through DNS records. + ansible.builtin.set_fact: + _aws_ecs_cluster_dns_all_domains: "{{ _aws_ecs_cluster_dns_all_domains + [{'domain': item.domain, 'zone': item.zone, 'aws_profile': item.aws_profile}] }}" + loop: "{{ deploy_container.aws_ecs.acm.extra_domains }}" + when: + - deploy_container.aws_ecs.acm.extra_domains | length > 0 + - deploy_container.aws_ecs.enabled + +- name: Add DNS records in Route 53. + amazon.aws.route53: + state: present + profile: "{{ item.aws_profile }}" + zone: "{{ item.zone }}" + record: "{{ item.domain }}" + type: CNAME + value: "{{ _aws_ecs_cluster_alb.dns_name }}" + overwrite: true + loop: "{{ _aws_ecs_cluster_dns_all_domains }}" + when: + - deploy_container.aws_ecs.route_53.zone | length > 0 + - deploy_container.aws_ecs.enabled \ No newline at end of file diff --git a/roles/deploy_container/tasks/subnet.yml b/roles/deploy_container/tasks/subnet.yml new file mode 100644 index 00000000..3cb3a06a --- /dev/null +++ b/roles/deploy_container/tasks/subnet.yml @@ -0,0 +1,13 @@ +- name: Gather public subnet information. + amazon.aws.ec2_vpc_subnet_info: + profile: "{{ deploy_container.aws_ecs.profile }}" + region: "{{ deploy_container.aws_ecs.region }}" + filters: + vpc-id: "{{ _aws_ecs_cluster_vpc_id }}" + tag:Name: "{{ subnet }}" + register: _aws_ecs_cluster_public_subnet + delegate_to: localhost + +- name: Add public subnet to the list. + ansible.builtin.set_fact: + _aws_ecs_cluster_public_subnets_ids: "{{ _aws_ecs_cluster_public_subnets_ids + [ _aws_ecs_cluster_public_subnet.subnets[0].subnet_id ] }}" From 21312cf9793c085a2e8485b76c05ab96f13fea61 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 13:11:56 +0200 Subject: [PATCH 07/28] Force a docker logout. --- roles/deploy_container/defaults/main.yml | 1 - roles/deploy_container/tasks/main.yml | 6 +++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index d84b0d72..cee7ce12 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -42,7 +42,6 @@ deploy_container: service_autoscale_up_cooldown: 120 service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at - service_elb_container_name: example # the name of the container to be load balanced execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers #cpu: 512 # these values can be set globally or per container diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index dd64309f..17f339be 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -31,6 +31,10 @@ when: deploy_container.aws_ecr.enabled delegate_to: localhost +- name: Log out of Docker registry to force a login. + community.docker.docker_login: + state: absent + - name: Log into Docker registry. community.docker.docker_login: registry_url: "{{ deploy_container.docker_registry_url }}" @@ -275,7 +279,7 @@ launch_type: "{{ deploy_container.aws_ecs.launch_type }}" platform_version: LATEST load_balancers: # see https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LoadBalancer.html - - containerName: "{{ deploy_container.aws_ecs.service_elb_container_name }}" + - containerName: "{{ deploy_container.container_name }}" containerPort: "{{ deploy_container.aws_ecs.target_group_port }}" targetGroupArn: "{{ _aws_ecs_target_group.target_group_arn }}" network_configuration: From 9005d12d6ae34961de06b94da9aa048417510a5a Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 13:26:17 +0200 Subject: [PATCH 08/28] Need to delegate docker commands to the controller. --- docs/roles/deploy_container.md | 1 - roles/deploy_container/README.md | 1 - roles/deploy_container/tasks/main.yml | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 0e4fe6fc..10292af6 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -60,7 +60,6 @@ deploy_container: service_autoscale_up_cooldown: 120 service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at - service_elb_container_name: example # the name of the container to be load balanced execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers #cpu: 512 # these values can be set globally or per container diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 0e4fe6fc..10292af6 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -60,7 +60,6 @@ deploy_container: service_autoscale_up_cooldown: 120 service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at - service_elb_container_name: example # the name of the container to be load balanced execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers #cpu: 512 # these values can be set globally or per container diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 17f339be..05e1ffc0 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -34,6 +34,7 @@ - name: Log out of Docker registry to force a login. community.docker.docker_login: state: absent + delegate_to: localhost - name: Log into Docker registry. community.docker.docker_login: From a609bfb2e62a7b88cb627279b0021d9e4aadadff Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 13:31:07 +0200 Subject: [PATCH 09/28] Trying deleting the credentials file instead. --- roles/deploy_container/tasks/main.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 05e1ffc0..83cb959a 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -31,9 +31,10 @@ when: deploy_container.aws_ecr.enabled delegate_to: localhost -- name: Log out of Docker registry to force a login. - community.docker.docker_login: +- name: Remove Docker credentials file. + ansible.builtin.file: state: absent + path: "/home/{{ deploy_user }}/.docker/config.json" delegate_to: localhost - name: Log into Docker registry. From 5990127c29d61c7510731dbf240908f5f1220a6f Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 13:38:06 +0200 Subject: [PATCH 10/28] Fixing var name in subnet.yml. --- roles/deploy_container/tasks/subnet.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/deploy_container/tasks/subnet.yml b/roles/deploy_container/tasks/subnet.yml index 3cb3a06a..a0701a36 100644 --- a/roles/deploy_container/tasks/subnet.yml +++ b/roles/deploy_container/tasks/subnet.yml @@ -1,6 +1,6 @@ - name: Gather public subnet information. amazon.aws.ec2_vpc_subnet_info: - profile: "{{ deploy_container.aws_ecs.profile }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" region: "{{ deploy_container.aws_ecs.region }}" filters: vpc-id: "{{ _aws_ecs_cluster_vpc_id }}" From 40b26871170f20da2c3ad1f0448a0b0fcb7b268e Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 13:51:08 +0200 Subject: [PATCH 11/28] Missing some ACM variables. --- roles/deploy_container/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 83cb959a..af1164eb 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -164,6 +164,9 @@ name: aws_acm vars: aws_acm: + region: "{{ deploy_container.aws_ecs.region }}" + aws_profile: "{{ deploy_container.aws_ecs.aws_profile }}" + tags: "{{ deploy_container.aws_ecs.tags }}" export: false domain_name: "{{ deploy_container.aws_ecs.domain_name }}" extra_domains: "{{ deploy_container.aws_ecs.acm.extra_domains }}" From d32257d068646ababd3d59031649a5e71e5d1305 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 16:35:00 +0200 Subject: [PATCH 12/28] Allowing us to set wait timeout for TG creation. --- docs/roles/deploy_container.md | 1 + roles/deploy_container/README.md | 1 + roles/deploy_container/defaults/main.yml | 1 + roles/deploy_container/tasks/main.yml | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 10292af6..1ebaa80f 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -70,6 +70,7 @@ deploy_container: target_group_name: example # 32 character limit target_group_protocol: http target_group_port: 80 + target_group_wait_timeout: 200 # how long to wait for target group events to complete targets: [] # typically we do not specify targets at this point, this will be handled automatically by the ECS service #- Id: 10.0.0.2 # Port: 80 diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 10292af6..1ebaa80f 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -70,6 +70,7 @@ deploy_container: target_group_name: example # 32 character limit target_group_protocol: http target_group_port: 80 + target_group_wait_timeout: 200 # how long to wait for target group events to complete targets: [] # typically we do not specify targets at this point, this will be handled automatically by the ECS service #- Id: 10.0.0.2 # Port: 80 diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index cee7ce12..00195f6a 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -52,6 +52,7 @@ deploy_container: target_group_name: example # 32 character limit target_group_protocol: http target_group_port: 80 + target_group_wait_timeout: 200 # how long to wait for target group events to complete targets: [] # typically we do not specify targets at this point, this will be handled automatically by the ECS service #- Id: 10.0.0.2 # Port: 80 diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index af1164eb..053e7ab9 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -153,7 +153,7 @@ target_type: ip targets: "{{ deploy_container.aws_ecs.targets }}" state: present - wait_timeout: 200 + wait_timeout: "{{ deploy_container.aws_ecs.target_group_wait_timeout }}" wait: true register: _aws_ecs_target_group delegate_to: localhost From 687ff1349556539331247ca94e6ceb2e3ebb9c5b Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 16:42:31 +0200 Subject: [PATCH 13/28] Uncommenting cpu and memory vars for ECS with Fargate. --- docs/roles/deploy_container.md | 4 ++-- roles/deploy_container/README.md | 4 ++-- roles/deploy_container/defaults/main.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 1ebaa80f..d1befb03 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -62,8 +62,8 @@ deploy_container: service_autoscale_target_value: 70 # the value to trigger a scaling event at execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers - #cpu: 512 # these values can be set globally or per container - #memory: 1024 + cpu: 512 # these values can be set globally or per container + memory: 1024 launch_type: FARGATE network_mode: awsvpc #volumes: [] # list of additional volumes to attach diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 1ebaa80f..d1befb03 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -62,8 +62,8 @@ deploy_container: service_autoscale_target_value: 70 # the value to trigger a scaling event at execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers - #cpu: 512 # these values can be set globally or per container - #memory: 1024 + cpu: 512 # these values can be set globally or per container + memory: 1024 launch_type: FARGATE network_mode: awsvpc #volumes: [] # list of additional volumes to attach diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index 00195f6a..22380049 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -44,8 +44,8 @@ deploy_container: service_autoscale_target_value: 70 # the value to trigger a scaling event at execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers - #cpu: 512 # these values can be set globally or per container - #memory: 1024 + cpu: 512 # these values can be set globally or per container + memory: 1024 launch_type: FARGATE network_mode: awsvpc #volumes: [] # list of additional volumes to attach From 3113716ca7c1e201f562b97ce088b68faf97b500 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 16:53:04 +0200 Subject: [PATCH 14/28] Missed a variable in ELB creation. --- roles/deploy_container/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 053e7ab9..0fafdfc2 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -255,7 +255,7 @@ security_groups: "{{ deploy_container.aws_ecs.elb_security_groups }}" listeners: "{{ _aws_ecs_cluster_listeners }}" idle_timeout: "{{ deploy_container.aws_ecs.elb_idle_timeout }}" - ip_address_type: "{{ aws_elb.ip_address_type }}" + ip_address_type: "{{ deploy_container.aws_ecs.elb_ip_address_type }}" register: _aws_ecs_cluster_alb delegate_to: localhost when: deploy_container.aws_ecs.enabled From a78b57a6cc2ab47057ac57e9dfd538cbb11aadeb Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 18:14:18 +0200 Subject: [PATCH 15/28] Couple of linting fixes. --- roles/deploy_container/defaults/main.yml | 2 +- roles/deploy_container/tasks/main.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index 22380049..77be7a6a 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -8,7 +8,7 @@ deploy_container: docker_base_command: "docker image build" docker_build_dir: "{{ _ce_deploy_build_dir }}" dockerfile_template: example.j2 # provide a templates directory next to your playbook and change this to match your Dockerfile template name - environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template + environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template # Requires the deploy IAM user to have the managed EC2InstanceProfileForImageBuilderECRContainerBuilds policy attached aws_ecr: enabled: false # set to true if using AWS ECR diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 0fafdfc2..23f4d996 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -1,10 +1,10 @@ --- # Build and ship a container image - name: Create Dockerfile from template. - local_action: - module: ansible.builtin.template + ansible.builtin.template: src: "{{ deploy_container.dockerfile_template }}" dest: "{{ deploy_container.docker_build_dir }}/Dockerfile" + delegate_to: localhost - name: Set Docker registry username and password. ansible.builtin.set_fact: From a9162bab3cd3a5481eab58b20a8353627d6a7708 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 18:24:26 +0200 Subject: [PATCH 16/28] Force building and tagging of containers as an option. --- docs/roles/deploy_container.md | 3 ++- roles/deploy_container/README.md | 3 ++- roles/deploy_container/defaults/main.yml | 1 + roles/deploy_container/tasks/main.yml | 2 ++ 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index d1befb03..8f7dca28 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -20,13 +20,14 @@ AWS ECR registries require the AWS CLI user provided for `ce-deploy` to have the deploy_container: container_name: example container_tag: latest # tag will take format container_name:container_tag + container_force_build: true # force Docker to build and tag a new image docker_registry_name: index.docker.io/example # combines with container_name to make the full registry name, docker_registry_name/container_name docker_registry_user: example docker_registry_pass: asdf1234 docker_base_command: "docker image build" docker_build_dir: "{{ _ce_deploy_build_dir }}" dockerfile_template: example.j2 # provide a templates directory next to your playbook and change this to match your Dockerfile template name - environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template + environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template # Requires the deploy IAM user to have the managed EC2InstanceProfileForImageBuilderECRContainerBuilds policy attached aws_ecr: enabled: false # set to true if using AWS ECR diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index d1befb03..8f7dca28 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -20,13 +20,14 @@ AWS ECR registries require the AWS CLI user provided for `ce-deploy` to have the deploy_container: container_name: example container_tag: latest # tag will take format container_name:container_tag + container_force_build: true # force Docker to build and tag a new image docker_registry_name: index.docker.io/example # combines with container_name to make the full registry name, docker_registry_name/container_name docker_registry_user: example docker_registry_pass: asdf1234 docker_base_command: "docker image build" docker_build_dir: "{{ _ce_deploy_build_dir }}" dockerfile_template: example.j2 # provide a templates directory next to your playbook and change this to match your Dockerfile template name - environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template + environment_vars: {} # dictionary you can populate for use in a custom Dockerfile template # Requires the deploy IAM user to have the managed EC2InstanceProfileForImageBuilderECRContainerBuilds policy attached aws_ecr: enabled: false # set to true if using AWS ECR diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index 77be7a6a..7605f796 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -2,6 +2,7 @@ deploy_container: container_name: example container_tag: latest # tag will take format container_name:container_tag + container_force_build: true # force Docker to build and tag a new image docker_registry_name: index.docker.io/example # combines with container_name to make the full registry name, docker_registry_name/container_name docker_registry_user: example docker_registry_pass: asdf1234 diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 23f4d996..34866aa1 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -53,6 +53,8 @@ tag: "{{ deploy_container.container_tag | default('latest') }}" push: true source: build + force_source: "{{ deploy_container.container_force_build }}" + force_tag: "{{ deploy_container.container_force_build }}" delegate_to: localhost # Fetch the ACM role from ce-provision From 2bfcb363b5fdc43f6e5133a4d42f402e585d88e2 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Tue, 27 Jun 2023 19:09:00 +0200 Subject: [PATCH 17/28] Improving docs for deploy_container role. --- docs/roles/deploy_container.md | 22 ++++++++++++++++++- .../sync/database_sync/database_sync-mysql.md | 5 +++-- roles/deploy_container/README.md | 22 ++++++++++++++++++- .../database_sync-mysql/README.md | 5 +++-- 4 files changed, 48 insertions(+), 6 deletions(-) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 8f7dca28..3e5a9c30 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -8,7 +8,27 @@ sudo usermod -aG docker deploy This can be handled automatically by [`ce-provision`](https://github.com/codeenigma/ce-provision) using the `ce_deploy` and `docker_ce` roles. -AWS ECR registries require the AWS CLI user provided for `ce-deploy` to have the managed AWS `EC2InstanceProfileForImageBuilderECRContainerBuilds` policy attached via IAM to allow access to fetch credentials and push containers. +## AWS IAM requirements +AWS integration requires the AWS CLI user provided for `ce-deploy` to have certain managed AWS policies attached. + +If you enable AWS ECR registry integration by setting `deploy_container.aws_ecr.enabled` to `true` then you will need the `EC2InstanceProfileForImageBuilderECRContainerBuilds` policy attached via IAM to allow access to fetch credentials and push containers. + +Similarly, if you set `deploy_container.aws_ecs.acm.create_cert` to `true` then you will need the `AWSCertificateManagerFullAccess` policy attaching to create SSL certificates. + +If you enable full AWS ECS integration by setting `deploy_container.aws_ecs.enabled` to `true` then this requires the following policies to be attached to the AWS CLI user: +* `AmazonECS_FullAccess` - to create task definitions and services +* `ElasticLoadBalancingFullAccess` - to create load balancers and target groups + +Finally, if you set `deploy_container.aws_ecs.route_53.zone` to another other than an empty string then you will also need `AmazonRoute53FullAccess` attaching to manipulate DNS entries in Route 53. + +The full list is: +* `EC2InstanceProfileForImageBuilderECRContainerBuilds` - to manipulate images in AWS ECR +* `AWSCertificateManagerFullAccess` - to manage SSL certificates +* `AmazonECS_FullAccess` - to create task definitions and services +* `ElasticLoadBalancingFullAccess` - to create load balancers and target groups +* `AmazonRoute53FullAccess` - to manage DNS entries + +Naturally you can always create custom policies and roles to have tighter access control. This document simply gives you the broad strokes AWS managed policies you can use in conjunction with this Ansible role. diff --git a/docs/roles/sync/database_sync/database_sync-mysql.md b/docs/roles/sync/database_sync/database_sync-mysql.md index cbe1a353..d70d770e 100644 --- a/docs/roles/sync/database_sync/database_sync-mysql.md +++ b/docs/roles/sync/database_sync/database_sync-mysql.md @@ -5,8 +5,9 @@ Sync MySQL databases between environments. ```yaml --- mysql_sync: - mysqldump_params: "{{ _mysqldump_params }}" # set in _init but you can override here - cleanup: true # if false leaves tmp database dump on deploy server for debugging purposes + mysqldump_params: "{{ _mysqldump_params }}" # set in _init but you can override here. + cleanup: true # if false leaves tmp database dump on deploy server for debugging purposes. + archival_method: "bzip2" # oprions are "bzip2" or "gzip". databases: - source: # Name of the database to take a dump from. diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 8f7dca28..3e5a9c30 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -8,7 +8,27 @@ sudo usermod -aG docker deploy This can be handled automatically by [`ce-provision`](https://github.com/codeenigma/ce-provision) using the `ce_deploy` and `docker_ce` roles. -AWS ECR registries require the AWS CLI user provided for `ce-deploy` to have the managed AWS `EC2InstanceProfileForImageBuilderECRContainerBuilds` policy attached via IAM to allow access to fetch credentials and push containers. +## AWS IAM requirements +AWS integration requires the AWS CLI user provided for `ce-deploy` to have certain managed AWS policies attached. + +If you enable AWS ECR registry integration by setting `deploy_container.aws_ecr.enabled` to `true` then you will need the `EC2InstanceProfileForImageBuilderECRContainerBuilds` policy attached via IAM to allow access to fetch credentials and push containers. + +Similarly, if you set `deploy_container.aws_ecs.acm.create_cert` to `true` then you will need the `AWSCertificateManagerFullAccess` policy attaching to create SSL certificates. + +If you enable full AWS ECS integration by setting `deploy_container.aws_ecs.enabled` to `true` then this requires the following policies to be attached to the AWS CLI user: +* `AmazonECS_FullAccess` - to create task definitions and services +* `ElasticLoadBalancingFullAccess` - to create load balancers and target groups + +Finally, if you set `deploy_container.aws_ecs.route_53.zone` to another other than an empty string then you will also need `AmazonRoute53FullAccess` attaching to manipulate DNS entries in Route 53. + +The full list is: +* `EC2InstanceProfileForImageBuilderECRContainerBuilds` - to manipulate images in AWS ECR +* `AWSCertificateManagerFullAccess` - to manage SSL certificates +* `AmazonECS_FullAccess` - to create task definitions and services +* `ElasticLoadBalancingFullAccess` - to create load balancers and target groups +* `AmazonRoute53FullAccess` - to manage DNS entries + +Naturally you can always create custom policies and roles to have tighter access control. This document simply gives you the broad strokes AWS managed policies you can use in conjunction with this Ansible role. diff --git a/roles/sync/database_sync/database_sync-mysql/README.md b/roles/sync/database_sync/database_sync-mysql/README.md index cbe1a353..d70d770e 100644 --- a/roles/sync/database_sync/database_sync-mysql/README.md +++ b/roles/sync/database_sync/database_sync-mysql/README.md @@ -5,8 +5,9 @@ Sync MySQL databases between environments. ```yaml --- mysql_sync: - mysqldump_params: "{{ _mysqldump_params }}" # set in _init but you can override here - cleanup: true # if false leaves tmp database dump on deploy server for debugging purposes + mysqldump_params: "{{ _mysqldump_params }}" # set in _init but you can override here. + cleanup: true # if false leaves tmp database dump on deploy server for debugging purposes. + archival_method: "bzip2" # oprions are "bzip2" or "gzip". databases: - source: # Name of the database to take a dump from. From 00e04f9bb4305987b8ebfba89dd017c9cf0d6b78 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Thu, 29 Jun 2023 14:32:13 +0200 Subject: [PATCH 18/28] Providing container description example for ECS. --- docs/roles/deploy_container.md | 22 +++++++++++++++++----- roles/deploy_container/README.md | 22 +++++++++++++++++----- roles/deploy_container/defaults/main.yml | 22 +++++++++++++++++----- 3 files changed, 51 insertions(+), 15 deletions(-) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 3e5a9c30..41a87e88 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -38,7 +38,7 @@ Naturally you can always create custom policies and roles to have tighter access ```yaml --- deploy_container: - container_name: example + container_name: example-container container_tag: latest # tag will take format container_name:container_tag container_force_build: true # force Docker to build and tag a new image docker_registry_name: index.docker.io/example # combines with container_name to make the full registry name, docker_registry_name/container_name @@ -70,8 +70,8 @@ deploy_container: - example-dev-a - example-dev-b security_groups: [] # list of security groups, accepts names or IDs - cluster_name: example - family_name: example + cluster_name: example-cluster + family_name: example-task-definition task_definition_revision: "" # integer, but must be presented as a string for Jinja2 task_count: 1 task_minimum_count: 1 @@ -82,7 +82,19 @@ deploy_container: service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable - containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers + containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers + - name: example-container + essential: true + image: index.docker.io/example:latest + portMappings: + - containerPort: 8080 # should match target_group_port + hostPort: 8080 + logConfiguration: + logDriver: awslogs + options: + awslogs-group: /ecs/example-cluster + awslogs-region: eu-west-1 + awslogs-stream-prefix: "ecs-example-task" cpu: 512 # these values can be set globally or per container memory: 1024 launch_type: FARGATE @@ -90,7 +102,7 @@ deploy_container: #volumes: [] # list of additional volumes to attach target_group_name: example # 32 character limit target_group_protocol: http - target_group_port: 80 + target_group_port: 8080 # ports lower than 1024 will require the app to be configured to run as a privileged user in the Dockerfile target_group_wait_timeout: 200 # how long to wait for target group events to complete targets: [] # typically we do not specify targets at this point, this will be handled automatically by the ECS service #- Id: 10.0.0.2 diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 3e5a9c30..41a87e88 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -38,7 +38,7 @@ Naturally you can always create custom policies and roles to have tighter access ```yaml --- deploy_container: - container_name: example + container_name: example-container container_tag: latest # tag will take format container_name:container_tag container_force_build: true # force Docker to build and tag a new image docker_registry_name: index.docker.io/example # combines with container_name to make the full registry name, docker_registry_name/container_name @@ -70,8 +70,8 @@ deploy_container: - example-dev-a - example-dev-b security_groups: [] # list of security groups, accepts names or IDs - cluster_name: example - family_name: example + cluster_name: example-cluster + family_name: example-task-definition task_definition_revision: "" # integer, but must be presented as a string for Jinja2 task_count: 1 task_minimum_count: 1 @@ -82,7 +82,19 @@ deploy_container: service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable - containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers + containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers + - name: example-container + essential: true + image: index.docker.io/example:latest + portMappings: + - containerPort: 8080 # should match target_group_port + hostPort: 8080 + logConfiguration: + logDriver: awslogs + options: + awslogs-group: /ecs/example-cluster + awslogs-region: eu-west-1 + awslogs-stream-prefix: "ecs-example-task" cpu: 512 # these values can be set globally or per container memory: 1024 launch_type: FARGATE @@ -90,7 +102,7 @@ deploy_container: #volumes: [] # list of additional volumes to attach target_group_name: example # 32 character limit target_group_protocol: http - target_group_port: 80 + target_group_port: 8080 # ports lower than 1024 will require the app to be configured to run as a privileged user in the Dockerfile target_group_wait_timeout: 200 # how long to wait for target group events to complete targets: [] # typically we do not specify targets at this point, this will be handled automatically by the ECS service #- Id: 10.0.0.2 diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index 7605f796..c660243a 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -1,6 +1,6 @@ --- deploy_container: - container_name: example + container_name: example-container container_tag: latest # tag will take format container_name:container_tag container_force_build: true # force Docker to build and tag a new image docker_registry_name: index.docker.io/example # combines with container_name to make the full registry name, docker_registry_name/container_name @@ -32,8 +32,8 @@ deploy_container: - example-dev-a - example-dev-b security_groups: [] # list of security groups, accepts names or IDs - cluster_name: example - family_name: example + cluster_name: example-cluster + family_name: example-task-definition task_definition_revision: "" # integer, but must be presented as a string for Jinja2 task_count: 1 task_minimum_count: 1 @@ -44,7 +44,19 @@ deploy_container: service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable - containers: [] # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers + containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers + - name: example-container + essential: true + image: index.docker.io/example:latest + portMappings: + - containerPort: 8080 # should match target_group_port + hostPort: 8080 + logConfiguration: + logDriver: awslogs + options: + awslogs-group: /ecs/example-cluster + awslogs-region: eu-west-1 + awslogs-stream-prefix: "ecs-example-task" cpu: 512 # these values can be set globally or per container memory: 1024 launch_type: FARGATE @@ -52,7 +64,7 @@ deploy_container: #volumes: [] # list of additional volumes to attach target_group_name: example # 32 character limit target_group_protocol: http - target_group_port: 80 + target_group_port: 8080 # ports lower than 1024 will require the app to be configured to run as a privileged user in the Dockerfile target_group_wait_timeout: 200 # how long to wait for target group events to complete targets: [] # typically we do not specify targets at this point, this will be handled automatically by the ECS service #- Id: 10.0.0.2 From 5574faa182f023c20ec34ae6dc7125c92b4c7dcb Mon Sep 17 00:00:00 2001 From: gregharvey Date: Thu, 29 Jun 2023 14:32:28 +0200 Subject: [PATCH 19/28] Adding 'force' option to docker builds. --- roles/deploy_container/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 34866aa1..17ef63cf 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -55,6 +55,7 @@ source: build force_source: "{{ deploy_container.container_force_build }}" force_tag: "{{ deploy_container.container_force_build }}" + force: "{{ deploy_container.container_force_build }}" delegate_to: localhost # Fetch the ACM role from ce-provision From d0f02ed8c3e2767e6f59c6775c939a3c1b8d99c7 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Thu, 29 Jun 2023 14:38:15 +0200 Subject: [PATCH 20/28] Revert "Adding 'force' option to docker builds." This reverts commit 5574faa182f023c20ec34ae6dc7125c92b4c7dcb. --- roles/deploy_container/tasks/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 17ef63cf..34866aa1 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -55,7 +55,6 @@ source: build force_source: "{{ deploy_container.container_force_build }}" force_tag: "{{ deploy_container.container_force_build }}" - force: "{{ deploy_container.container_force_build }}" delegate_to: localhost # Fetch the ACM role from ce-provision From 47a4c7f2207ab4d890f53520d907838abf84ab81 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Thu, 29 Jun 2023 14:53:01 +0200 Subject: [PATCH 21/28] Adding options to force ECS component refreshes. --- docs/roles/deploy_container.md | 2 ++ roles/deploy_container/README.md | 2 ++ roles/deploy_container/defaults/main.yml | 2 ++ roles/deploy_container/tasks/main.yml | 2 ++ 4 files changed, 8 insertions(+) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 41a87e88..8e5872cb 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -73,6 +73,7 @@ deploy_container: cluster_name: example-cluster family_name: example-task-definition task_definition_revision: "" # integer, but must be presented as a string for Jinja2 + task_definition_force_create: false # creates a task definition revision every time if set to true task_count: 1 task_minimum_count: 1 task_maximum_count: 4 @@ -81,6 +82,7 @@ deploy_container: service_autoscale_up_cooldown: 120 service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at + service_force_refresh: false # forces a refresh of all containers if set to true execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers - name: example-container diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 41a87e88..8e5872cb 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -73,6 +73,7 @@ deploy_container: cluster_name: example-cluster family_name: example-task-definition task_definition_revision: "" # integer, but must be presented as a string for Jinja2 + task_definition_force_create: false # creates a task definition revision every time if set to true task_count: 1 task_minimum_count: 1 task_maximum_count: 4 @@ -81,6 +82,7 @@ deploy_container: service_autoscale_up_cooldown: 120 service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at + service_force_refresh: false # forces a refresh of all containers if set to true execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers - name: example-container diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index c660243a..0ca73b42 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -35,6 +35,7 @@ deploy_container: cluster_name: example-cluster family_name: example-task-definition task_definition_revision: "" # integer, but must be presented as a string for Jinja2 + task_definition_force_create: false # creates a task definition revision every time if set to true task_count: 1 task_minimum_count: 1 task_maximum_count: 4 @@ -43,6 +44,7 @@ deploy_container: service_autoscale_up_cooldown: 120 service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at + service_force_refresh: false # forces a refresh of all containers if set to true execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers - name: example-container diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 34866aa1..8238e080 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -138,6 +138,7 @@ state: present network_mode: "{{ deploy_container.aws_ecs.network_mode }}" volumes: "{{ deploy_container.aws_ecs.volumes | default(omit) }}" + force_create: "{{ deploy_container.aws_ecs.task_definition_force_create }}" delegate_to: localhost when: deploy_container.aws_ecs.enabled @@ -294,6 +295,7 @@ security_groups: "{{ deploy_container.aws_ecs.security_groups }}" assign_public_ip: true # must be true for now - details: https://stackoverflow.com/a/66802973 tags: "{{ deploy_container.aws_ecs.tags }}" + force_new_deployment: "{{ deploy_container.aws_ecs.service_force_refresh }}" wait: true delegate_to: localhost when: deploy_container.aws_ecs.enabled From 177d689caeb138c0a2918f8191bc928eb65ea4e8 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Thu, 29 Jun 2023 18:51:29 +0200 Subject: [PATCH 22/28] Allowing users to toggle public IP and execute command mode. --- docs/roles/deploy_container.md | 2 ++ roles/deploy_container/README.md | 2 ++ roles/deploy_container/defaults/main.yml | 2 ++ roles/deploy_container/tasks/main.yml | 3 ++- 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 8e5872cb..9970dc39 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -82,6 +82,8 @@ deploy_container: service_autoscale_up_cooldown: 120 service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at + service_public_container_ip: false # set to true to make containers appear on an EIP - more details: https://stackoverflow.com/a/66802973 + service_enable_ssm: false # set to true to allow arbitrary command execution on containers via the AWS API service_force_refresh: false # forces a refresh of all containers if set to true execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 8e5872cb..9970dc39 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -82,6 +82,8 @@ deploy_container: service_autoscale_up_cooldown: 120 service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at + service_public_container_ip: false # set to true to make containers appear on an EIP - more details: https://stackoverflow.com/a/66802973 + service_enable_ssm: false # set to true to allow arbitrary command execution on containers via the AWS API service_force_refresh: false # forces a refresh of all containers if set to true execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index 0ca73b42..a9b7e9d9 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -44,6 +44,8 @@ deploy_container: service_autoscale_up_cooldown: 120 service_autoscale_down_cooldown: 120 service_autoscale_target_value: 70 # the value to trigger a scaling event at + service_public_container_ip: false # set to true to make containers appear on an EIP - more details: https://stackoverflow.com/a/66802973 + service_enable_ssm: false # set to true to allow arbitrary command execution on containers via the AWS API service_force_refresh: false # forces a refresh of all containers if set to true execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 8238e080..de12e5bc 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -293,8 +293,9 @@ network_configuration: subnets: "{{ _aws_ecs_cluster_public_subnets_ids }}" security_groups: "{{ deploy_container.aws_ecs.security_groups }}" - assign_public_ip: true # must be true for now - details: https://stackoverflow.com/a/66802973 + assign_public_ip: "{{ deploy_container.aws_ecs.service_public_container_ip }}" tags: "{{ deploy_container.aws_ecs.tags }}" + enable_execute_command: "{{ deploy_container.aws_ecs.service_enable_ssm }}" force_new_deployment: "{{ deploy_container.aws_ecs.service_force_refresh }}" wait: true delegate_to: localhost From 6ac9b6ad2ee0ff02df1b7e672a492172a2271133 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Fri, 30 Jun 2023 09:57:48 +0200 Subject: [PATCH 23/28] The ALB needs to be on public subnets, not the same private ones as the cluster. --- roles/deploy_container/defaults/main.yml | 13 +++++++++---- roles/deploy_container/tasks/main.yml | 17 +++++++++++++---- roles/deploy_container/tasks/subnet-private.yml | 13 +++++++++++++ .../tasks/{subnet.yml => subnet-public.yml} | 0 4 files changed, 35 insertions(+), 8 deletions(-) create mode 100644 roles/deploy_container/tasks/subnet-private.yml rename roles/deploy_container/tasks/{subnet.yml => subnet-public.yml} (100%) diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index a9b7e9d9..be178aca 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -28,17 +28,20 @@ deploy_container: aws_profile: example2 # might not be the same account vpc_name: example #vpc_id: vpc-XXXXXXX # optionally specify VPC ID to use - subnets: # list of public subnet names - - example-dev-a - - example-dev-b security_groups: [] # list of security groups, accepts names or IDs cluster_name: example-cluster family_name: example-task-definition task_definition_revision: "" # integer, but must be presented as a string for Jinja2 task_definition_force_create: false # creates a task definition revision every time if set to true + task_execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable + #task_role_arn: "" # required if you set service_enable_ssm to true task_count: 1 task_minimum_count: 1 task_maximum_count: 4 + # These subnets are usually the subnets created by ce-provision when you made your ECS cluster and must have a NAT gateway for ECR access. + service_subnets: # list of private subnet names + - example-cluster-dev-a + - example-cluster-dev-b # See docs for values: https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html service_autoscale_metric_type: ECSServiceAverageCPUUtilization service_autoscale_up_cooldown: 120 @@ -47,7 +50,6 @@ deploy_container: service_public_container_ip: false # set to true to make containers appear on an EIP - more details: https://stackoverflow.com/a/66802973 service_enable_ssm: false # set to true to allow arbitrary command execution on containers via the AWS API service_force_refresh: false # forces a refresh of all containers if set to true - execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers - name: example-container essential: true @@ -84,6 +86,9 @@ deploy_container: extra_domains: [] # list of Subject Alternative Name domains and zones ssl_certificate_ARN: "" # optional SSL cert ARN if you imported one into AWS Certificate Manager elb_security_groups: [] # default SG is used if none provided - module supports names or IDs + elb_subnets: # must be public subnets for public facing applications + - example-dev-a + - example-dev-b elb_http_port: 80 elb_https_port: 443 elb_ssl_policy: ELBSecurityPolicy-TLS13-1-2-2021-06 # see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index de12e5bc..6c68bebd 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -115,11 +115,19 @@ - name: Reset subnets lists. ansible.builtin.set_fact: _aws_ecs_cluster_public_subnets_ids: [] + _aws_ecs_cluster_private_subnets_ids: [] when: deploy_container.aws_ecs.enabled - name: Construct list of public subnet IDs. - ansible.builtin.include_tasks: subnet.yml - with_items: "{{ deploy_container.aws_ecs.subnets }}" + ansible.builtin.include_tasks: subnet-public.yml + with_items: "{{ deploy_container.aws_ecs.elb_subnets }}" + loop_control: + loop_var: subnet + when: deploy_container.aws_ecs.enabled + +- name: Construct list of private subnet IDs. + ansible.builtin.include_tasks: subnet-private.yml + with_items: "{{ deploy_container.aws_ecs.service_subnets }}" loop_control: loop_var: subnet when: deploy_container.aws_ecs.enabled @@ -130,7 +138,8 @@ region: "{{ deploy_container.aws_ecs.region }}" profile: "{{ deploy_container.aws_ecs.aws_profile }}" family: "{{ deploy_container.aws_ecs.family_name }}" - execution_role_arn: "{{ deploy_container.aws_ecs.execution_role_arn }}" + execution_role_arn: "{{ deploy_container.aws_ecs.task_execution_role_arn }}" + task_role_arn: "{{ deploy_container.aws_ecs.task_role_arn | default(omit) }}" containers: "{{ deploy_container.aws_ecs.containers }}" launch_type: "{{ deploy_container.aws_ecs.launch_type }}" cpu: "{{ deploy_container.aws_ecs.cpu | default(omit) }}" @@ -291,7 +300,7 @@ containerPort: "{{ deploy_container.aws_ecs.target_group_port }}" targetGroupArn: "{{ _aws_ecs_target_group.target_group_arn }}" network_configuration: - subnets: "{{ _aws_ecs_cluster_public_subnets_ids }}" + subnets: "{{ _aws_ecs_cluster_private_subnets_ids }}" # internal private subnet security_groups: "{{ deploy_container.aws_ecs.security_groups }}" assign_public_ip: "{{ deploy_container.aws_ecs.service_public_container_ip }}" tags: "{{ deploy_container.aws_ecs.tags }}" diff --git a/roles/deploy_container/tasks/subnet-private.yml b/roles/deploy_container/tasks/subnet-private.yml new file mode 100644 index 00000000..2335cb89 --- /dev/null +++ b/roles/deploy_container/tasks/subnet-private.yml @@ -0,0 +1,13 @@ +- name: Gather private subnet information. + amazon.aws.ec2_vpc_subnet_info: + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + region: "{{ deploy_container.aws_ecs.region }}" + filters: + vpc-id: "{{ _aws_ecs_cluster_vpc_id }}" + tag:Name: "{{ subnet }}" + register: _aws_ecs_cluster_private_subnet + delegate_to: localhost + +- name: Add private subnet to the list. + ansible.builtin.set_fact: + _aws_ecs_cluster_private_subnets_ids: "{{ _aws_ecs_cluster_private_subnets_ids + [ _aws_ecs_cluster_private_subnet.subnets[0].subnet_id ] }}" diff --git a/roles/deploy_container/tasks/subnet.yml b/roles/deploy_container/tasks/subnet-public.yml similarity index 100% rename from roles/deploy_container/tasks/subnet.yml rename to roles/deploy_container/tasks/subnet-public.yml From e4bdb44ccca46e529f6e08339a2798ba251af0cd Mon Sep 17 00:00:00 2001 From: gregharvey Date: Fri, 30 Jun 2023 09:58:12 +0200 Subject: [PATCH 24/28] Updating documentation. --- docs/roles/deploy_container.md | 13 +++++++++---- roles/deploy_container/README.md | 13 +++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 9970dc39..8152cedc 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -66,17 +66,20 @@ deploy_container: aws_profile: example2 # might not be the same account vpc_name: example #vpc_id: vpc-XXXXXXX # optionally specify VPC ID to use - subnets: # list of public subnet names - - example-dev-a - - example-dev-b security_groups: [] # list of security groups, accepts names or IDs cluster_name: example-cluster family_name: example-task-definition task_definition_revision: "" # integer, but must be presented as a string for Jinja2 task_definition_force_create: false # creates a task definition revision every time if set to true + task_execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable + #task_role_arn: "" # required if you set service_enable_ssm to true task_count: 1 task_minimum_count: 1 task_maximum_count: 4 + # These subnets are usually the subnets created by ce-provision when you made your ECS cluster and must have a NAT gateway for ECR access. + service_subnets: # list of private subnet names + - example-cluster-dev-a + - example-cluster-dev-b # See docs for values: https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html service_autoscale_metric_type: ECSServiceAverageCPUUtilization service_autoscale_up_cooldown: 120 @@ -85,7 +88,6 @@ deploy_container: service_public_container_ip: false # set to true to make containers appear on an EIP - more details: https://stackoverflow.com/a/66802973 service_enable_ssm: false # set to true to allow arbitrary command execution on containers via the AWS API service_force_refresh: false # forces a refresh of all containers if set to true - execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers - name: example-container essential: true @@ -122,6 +124,9 @@ deploy_container: extra_domains: [] # list of Subject Alternative Name domains and zones ssl_certificate_ARN: "" # optional SSL cert ARN if you imported one into AWS Certificate Manager elb_security_groups: [] # default SG is used if none provided - module supports names or IDs + elb_subnets: # must be public subnets for public facing applications + - example-dev-a + - example-dev-b elb_http_port: 80 elb_https_port: 443 elb_ssl_policy: ELBSecurityPolicy-TLS13-1-2-2021-06 # see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 9970dc39..8152cedc 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -66,17 +66,20 @@ deploy_container: aws_profile: example2 # might not be the same account vpc_name: example #vpc_id: vpc-XXXXXXX # optionally specify VPC ID to use - subnets: # list of public subnet names - - example-dev-a - - example-dev-b security_groups: [] # list of security groups, accepts names or IDs cluster_name: example-cluster family_name: example-task-definition task_definition_revision: "" # integer, but must be presented as a string for Jinja2 task_definition_force_create: false # creates a task definition revision every time if set to true + task_execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable + #task_role_arn: "" # required if you set service_enable_ssm to true task_count: 1 task_minimum_count: 1 task_maximum_count: 4 + # These subnets are usually the subnets created by ce-provision when you made your ECS cluster and must have a NAT gateway for ECR access. + service_subnets: # list of private subnet names + - example-cluster-dev-a + - example-cluster-dev-b # See docs for values: https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html service_autoscale_metric_type: ECSServiceAverageCPUUtilization service_autoscale_up_cooldown: 120 @@ -85,7 +88,6 @@ deploy_container: service_public_container_ip: false # set to true to make containers appear on an EIP - more details: https://stackoverflow.com/a/66802973 service_enable_ssm: false # set to true to allow arbitrary command execution on containers via the AWS API service_force_refresh: false # forces a refresh of all containers if set to true - execution_role_arn: "arn:aws:iam::000000000000:role/ecsTaskExecutionRole" # ARN of the IAM role to run the task as, must have access to the ECR repository if applicable containers: # list of container definitions, see docs: https://docs.ansible.com/ansible/latest/collections/community/aws/ecs_taskdefinition_module.html#parameter-containers - name: example-container essential: true @@ -122,6 +124,9 @@ deploy_container: extra_domains: [] # list of Subject Alternative Name domains and zones ssl_certificate_ARN: "" # optional SSL cert ARN if you imported one into AWS Certificate Manager elb_security_groups: [] # default SG is used if none provided - module supports names or IDs + elb_subnets: # must be public subnets for public facing applications + - example-dev-a + - example-dev-b elb_http_port: 80 elb_https_port: 443 elb_ssl_policy: ELBSecurityPolicy-TLS13-1-2-2021-06 # see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies From 65fb103fcdc7ccb13295b295703f1ac28e0766e0 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Fri, 30 Jun 2023 10:51:27 +0200 Subject: [PATCH 25/28] Updating ECS docs. --- docs/roles/deploy_container.md | 3 +++ roles/deploy_container/README.md | 3 +++ 2 files changed, 6 insertions(+) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 8152cedc..f127bb61 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -30,6 +30,9 @@ The full list is: Naturally you can always create custom policies and roles to have tighter access control. This document simply gives you the broad strokes AWS managed policies you can use in conjunction with this Ansible role. +# Peculiarities of AWS ECS +It is worth noting that even if you put your containers on private subnets and configure your apps to use internal addressing, traffic will pass via the public interface. Therefore any safelisting of IP addresses needs to include the IP addresses of the NAT gateways of your private subnets. [More on how this works here.](https://docs.aws.amazon.com/AmazonECS/latest/bestpracticesguide/networking-connecting-vpc.html) + diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 8152cedc..f127bb61 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -30,6 +30,9 @@ The full list is: Naturally you can always create custom policies and roles to have tighter access control. This document simply gives you the broad strokes AWS managed policies you can use in conjunction with this Ansible role. +# Peculiarities of AWS ECS +It is worth noting that even if you put your containers on private subnets and configure your apps to use internal addressing, traffic will pass via the public interface. Therefore any safelisting of IP addresses needs to include the IP addresses of the NAT gateways of your private subnets. [More on how this works here.](https://docs.aws.amazon.com/AmazonECS/latest/bestpracticesguide/networking-connecting-vpc.html) + From 4acd1da461f57977d56f512f0cb7ed679edeab1a Mon Sep 17 00:00:00 2001 From: gregharvey Date: Wed, 20 Dec 2023 18:32:42 +0100 Subject: [PATCH 26/28] New deploy_container destroy action. --- roles/deploy_container/defaults/main.yml | 1 + .../deploy_container/tasks/action-cleanup.yml | 3 + .../deploy_container/tasks/action-create.yml | 363 +++++++++++++++++ .../deploy_container/tasks/action-destroy.yml | 254 ++++++++++++ roles/deploy_container/tasks/main.yml | 371 +----------------- 5 files changed, 630 insertions(+), 362 deletions(-) create mode 100644 roles/deploy_container/tasks/action-cleanup.yml create mode 100644 roles/deploy_container/tasks/action-create.yml create mode 100644 roles/deploy_container/tasks/action-destroy.yml diff --git a/roles/deploy_container/defaults/main.yml b/roles/deploy_container/defaults/main.yml index f01dcb15..ff99d2b9 100644 --- a/roles/deploy_container/defaults/main.yml +++ b/roles/deploy_container/defaults/main.yml @@ -1,5 +1,6 @@ --- deploy_container: + action: create # can also be destroy container_name: example-container container_tag: latest # tag will take format container_name:container_tag container_force_build: true # force Docker to build and tag a new image diff --git a/roles/deploy_container/tasks/action-cleanup.yml b/roles/deploy_container/tasks/action-cleanup.yml new file mode 100644 index 00000000..57312dbe --- /dev/null +++ b/roles/deploy_container/tasks/action-cleanup.yml @@ -0,0 +1,3 @@ +--- +# @TODO we need container image cleanup here and potentially ECS Task Definition cleanup too. +# Possibly useful link: https://stackoverflow.com/a/40949364 diff --git a/roles/deploy_container/tasks/action-create.yml b/roles/deploy_container/tasks/action-create.yml new file mode 100644 index 00000000..cc06163d --- /dev/null +++ b/roles/deploy_container/tasks/action-create.yml @@ -0,0 +1,363 @@ +--- +# Build and ship a container image +- name: Create Dockerfile from template. + ansible.builtin.template: + src: "{{ deploy_container.dockerfile_template }}" + dest: "{{ deploy_container.docker_build_dir }}/Dockerfile" + delegate_to: localhost + +- name: Set Docker registry username and password. + ansible.builtin.set_fact: + _docker_registry_username: "{{ deploy_container.docker_registry_user }}" + _docker_registry_password: "{{ deploy_container.docker_registry_pass }}" + delegate_to: localhost + +- name: Fetch AWS ECR registry login token. # token valid for 12 hours + ansible.builtin.command: + cmd: "aws ecr get-login-password --region {{ deploy_container.aws_ecr.region }} --profile {{ deploy_container.aws_ecr.aws_profile }}" + when: deploy_container.aws_ecr.enabled + delegate_to: localhost + register: _docker_registry_ecr_token + +- name: Set AWS ECR registry password. + ansible.builtin.set_fact: + _docker_registry_password: "{{ _docker_registry_ecr_token.stdout }}" + when: deploy_container.aws_ecr.enabled + delegate_to: localhost + +- name: Set AWS ECR registry username. + ansible.builtin.set_fact: + _docker_registry_username: "AWS" + when: deploy_container.aws_ecr.enabled + delegate_to: localhost + +- name: Remove Docker credentials file. + ansible.builtin.file: + state: absent + path: "/home/{{ deploy_user }}/.docker/config.json" + delegate_to: localhost + +- name: Log into Docker registry. + community.docker.docker_login: + registry_url: "{{ deploy_container.docker_registry_url }}" + username: "{{ _docker_registry_username }}" + password: "{{ _docker_registry_password }}" + reauthorize: true + delegate_to: localhost + +- name: Build and push container image. + community.docker.docker_image: + build: + path: "{{ deploy_container.docker_build_dir }}" + name: "{{ deploy_container.docker_registry_name }}/{{ deploy_container.container_name }}" + tag: "{{ deploy_container.container_tag | default('latest') }}" + push: true + source: build + force_source: "{{ deploy_container.container_force_build }}" + force_tag: "{{ deploy_container.container_force_build }}" + delegate_to: localhost + +# Fetch the ACM role from ce-provision +- name: Ensure the aws_acm directory exists. + ansible.builtin.file: + path: "{{ _ce_deploy_base_dir }}/roles/aws_acm/{{ item }}" + state: directory + mode: '0755' + delegate_to: localhost + with_items: + - tasks + - defaults + +- name: Fetch the aws_acm files. + ansible.builtin.get_url: + url: "https://raw.githubusercontent.com/codeenigma/ce-provision/1.x/roles/aws/aws_acm/{{ item }}/main.yml" + dest: "{{ _ce_deploy_base_dir }}/roles/aws_acm/{{ item }}/main.yml" + delegate_to: localhost + with_items: + - tasks + - defaults + +- name: Fetch the aws_acm tasks. + ansible.builtin.get_url: + url: https://raw.githubusercontent.com/codeenigma/ce-provision/1.x/roles/aws/aws_acm/tasks/main.yml + dest: "{{ _ce_deploy_base_dir }}/roles/aws_acm/tasks/main.yml" + delegate_to: localhost + +# Gather all network information +- name: Gather VPC information. + amazon.aws.ec2_vpc_net_info: + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + region: "{{ deploy_container.aws_ecs.region }}" + filters: + "tag:Name": "{{ deploy_container.aws_ecs.vpc_name }}" + register: _aws_ecs_cluster_vpc + delegate_to: localhost + when: + - deploy_container.aws_ecs.enabled + - deploy_container.aws_ecs.vpc_name is defined + - deploy_container.aws_ecs.vpc_name | length > 0 + +- name: Set the VPC id from name. + ansible.builtin.set_fact: + _aws_ecs_cluster_vpc_id: "{{ _aws_ecs_cluster_vpc.vpcs[0].vpc_id }}" + when: + - deploy_container.aws_ecs.enabled + - deploy_container.aws_ecs.vpc_name is defined + - deploy_container.aws_ecs.vpc_name | length > 0 + +- name: Use provided VPC id. + ansible.builtin.set_fact: + _aws_ecs_cluster_vpc_id: "{{ deploy_container.aws_ecs.vpc_id }}" + when: + - deploy_container.aws_ecs.enabled + - (deploy_container.aws_ecs.vpc_name is not defined or deploy_container.aws_ecs.vpc_name | length < 0) + +- name: Reset subnets lists. + ansible.builtin.set_fact: + _aws_ecs_cluster_public_subnets_ids: [] + _aws_ecs_cluster_private_subnets_ids: [] + when: deploy_container.aws_ecs.enabled + +- name: Construct list of public subnet IDs. + ansible.builtin.include_tasks: subnet-public.yml + with_items: "{{ deploy_container.aws_ecs.elb_subnets }}" + loop_control: + loop_var: subnet + when: deploy_container.aws_ecs.enabled + +- name: Construct list of private subnet IDs. + ansible.builtin.include_tasks: subnet-private.yml + with_items: "{{ deploy_container.aws_ecs.service_subnets }}" + loop_control: + loop_var: subnet + when: deploy_container.aws_ecs.enabled + +# Construct AWS supporting assets +- name: Create task definition. + community.aws.ecs_taskdefinition: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + family: "{{ deploy_container.aws_ecs.family_name }}" + execution_role_arn: "{{ deploy_container.aws_ecs.task_execution_role_arn }}" + task_role_arn: "{{ deploy_container.aws_ecs.task_role_arn | default(omit) }}" + containers: "{{ deploy_container.aws_ecs.containers }}" + launch_type: "{{ deploy_container.aws_ecs.launch_type }}" + cpu: "{{ deploy_container.aws_ecs.cpu | default(omit) }}" + memory: "{{ deploy_container.aws_ecs.memory | default(omit) }}" + state: present + network_mode: "{{ deploy_container.aws_ecs.network_mode }}" + volumes: "{{ deploy_container.aws_ecs.volumes | default(omit) }}" + force_create: "{{ deploy_container.aws_ecs.task_definition_force_create }}" + delegate_to: localhost + when: deploy_container.aws_ecs.enabled + +- name: Create a target group with IP address targets. + community.aws.elb_target_group: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + name: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" # 32 char limit + protocol: "{{ deploy_container.aws_ecs.target_group_protocol }}" + port: "{{ deploy_container.aws_ecs.target_group_port }}" + vpc_id: "{{ _aws_ecs_cluster_vpc_id }}" + health_check_protocol: "{{ deploy_container.aws_ecs.health_check.protocol }}" + health_check_path: "{{ deploy_container.aws_ecs.health_check.path }}" + successful_response_codes: "{{ deploy_container.aws_ecs.health_check.response_codes }}" + target_type: ip + targets: "{{ deploy_container.aws_ecs.targets }}" + state: present + wait_timeout: "{{ deploy_container.aws_ecs.target_group_wait_timeout }}" + wait: true + register: _aws_ecs_target_group + delegate_to: localhost + when: deploy_container.aws_ecs.enabled + +- name: Create SSL certificate for load balancer. + ansible.builtin.include_role: + name: aws_acm + vars: + aws_acm: + region: "{{ deploy_container.aws_ecs.region }}" + aws_profile: "{{ deploy_container.aws_ecs.aws_profile }}" + tags: "{{ deploy_container.aws_ecs.tags }}" + export: false + domain_name: "{{ deploy_container.aws_ecs.domain_name }}" + extra_domains: "{{ deploy_container.aws_ecs.acm.extra_domains }}" + route_53: + aws_profile: "{{ deploy_container.aws_ecs.route_53.aws_profile }}" + zone: "{{ deploy_container.aws_ecs.route_53.zone }}" + when: + - deploy_container.aws_ecs.acm.create_cert + - deploy_container.aws_ecs.enabled + +- name: Default to provided SSL certificate ARN. + ansible.builtin.set_fact: + _ssl_certificate_ARN: "{{ deploy_container.aws_ecs.ssl_certificate_ARN }}" + when: deploy_container.aws_ecs.enabled + +- name: If provided, override SSL certificate ARN with the one received from ACM. + ansible.builtin.set_fact: + _ssl_certificate_ARN: "{{ aws_acm_certificate_arn }}" + when: + - deploy_container.aws_ecs.acm.create_cert + - deploy_container.aws_ecs.enabled + +- name: Define default ALB listeners. + ansible.builtin.set_fact: + _aws_ecs_cluster_listeners_http: + Protocol: HTTP + Port: "{{ deploy_container.aws_ecs.elb_http_port }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" + Rules: "{{ deploy_container.aws_ecs.elb_listener_http_rules }}" + _aws_ecs_cluster_listeners_redirect: + Protocol: HTTP + Port: "{{ deploy_container.aws_ecs.elb_http_port }}" + DefaultActions: + - Type: redirect + RedirectConfig: + Protocol: HTTPS + Host: "#{host}" + Query: "#{query}" + Path: "/#{path}" + Port: "{{ deploy_container.aws_ecs.elb_https_port }}" + StatusCode: HTTP_301 + _aws_ecs_cluster_listeners_https: + Protocol: HTTPS + Port: "{{ deploy_container.aws_ecs.elb_https_port }}" + SslPolicy: "{{ deploy_container.aws_ecs.elb_ssl_policy }}" + Certificates: + - CertificateArn: "{{ _ssl_certificate_ARN }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" + Rules: "{{ deploy_container.aws_ecs.elb_listener_https_rules }}" + when: deploy_container.aws_ecs.enabled + +- name: Add HTTP listeners. + ansible.builtin.set_fact: + _aws_ecs_cluster_listeners: "{{ [ _aws_ecs_cluster_listeners_http ] }}" + when: + - _ssl_certificate_ARN | length < 1 + - deploy_container.aws_ecs.enabled + +- name: Add HTTPS Listener. + ansible.builtin.set_fact: + _aws_ecs_cluster_listeners: "{{ [ _aws_ecs_cluster_listeners_redirect, _aws_ecs_cluster_listeners_https ] }}" + when: + - _ssl_certificate_ARN | length > 1 + - deploy_container.aws_ecs.enabled + +- name: Add custom Listeners. + ansible.builtin.set_fact: + _aws_ecs_cluster_listeners: "{{ _aws_ecs_cluster_listeners + deploy_container.aws_ecs.elb_listeners }}" + when: + - deploy_container.aws_ecs.elb_listeners is defined + - deploy_container.aws_ecs.elb_listeners | length + - deploy_container.aws_ecs.enabled + +- name: Create an ALB. + amazon.aws.elb_application_lb: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + name: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" # 32 char limit + state: present + tags: "{{ deploy_container.aws_ecs.tags }}" + subnets: "{{ _aws_ecs_cluster_public_subnets_ids }}" + security_groups: "{{ deploy_container.aws_ecs.elb_security_groups }}" + listeners: "{{ _aws_ecs_cluster_listeners }}" + idle_timeout: "{{ deploy_container.aws_ecs.elb_idle_timeout }}" + ip_address_type: "{{ deploy_container.aws_ecs.elb_ip_address_type }}" + register: _aws_ecs_cluster_alb + delegate_to: localhost + when: deploy_container.aws_ecs.enabled + +- name: Set task definition name. + ansible.builtin.set_fact: + _aws_ecs_service_task_definition: "{{ deploy_container.aws_ecs.family_name }}" + when: deploy_container.aws_ecs.enabled + +- name: Set task definition revision if applicable. + ansible.builtin.set_fact: + _aws_ecs_service_task_definition: "{{ deploy_container.aws_ecs.family_name }}:{{ deploy_container.aws_ecs.task_definition_revision }}" + when: + - deploy_container.aws_ecs.task_definition_revision | length > 0 + - deploy_container.aws_ecs.enabled + +- name: Create ECS service. + community.aws.ecs_service: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + state: present + name: "{{ deploy_container.aws_ecs.family_name }}" + cluster: "{{ deploy_container.aws_ecs.cluster_name }}" + task_definition: "{{ _aws_ecs_service_task_definition }}" + desired_count: "{{ deploy_container.aws_ecs.task_count }}" + launch_type: "{{ deploy_container.aws_ecs.launch_type }}" + platform_version: LATEST + load_balancers: # see https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LoadBalancer.html + - containerName: "{{ deploy_container.container_name }}" + containerPort: "{{ deploy_container.aws_ecs.target_group_port }}" + targetGroupArn: "{{ _aws_ecs_target_group.target_group_arn }}" + network_configuration: + subnets: "{{ _aws_ecs_cluster_private_subnets_ids }}" # internal private subnet + security_groups: "{{ deploy_container.aws_ecs.security_groups }}" + assign_public_ip: "{{ deploy_container.aws_ecs.service_public_container_ip }}" + tags: "{{ deploy_container.aws_ecs.tags }}" + enable_execute_command: "{{ deploy_container.aws_ecs.service_enable_ssm }}" + force_new_deployment: "{{ deploy_container.aws_ecs.service_force_refresh }}" + wait: true + delegate_to: localhost + when: deploy_container.aws_ecs.enabled + +- name: Create target tracking scaling policy for ECS service. + community.aws.application_autoscaling_policy: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + state: present + policy_name: "{{ deploy_container.aws_ecs.family_name }}" + service_namespace: ecs + resource_id: "service/{{ deploy_container.aws_ecs.cluster_name }}/{{ deploy_container.aws_ecs.family_name }}" + scalable_dimension: ecs:service:DesiredCount + minimum_tasks: "{{ deploy_container.aws_ecs.task_minimum_count }}" + maximum_tasks: "{{ deploy_container.aws_ecs.task_maximum_count }}" + policy_type: TargetTrackingScaling + target_tracking_scaling_policy_configuration: + PredefinedMetricSpecification: + PredefinedMetricType: "{{ deploy_container.aws_ecs.service_autoscale_metric_type }}" + ScaleInCooldown: "{{ deploy_container.aws_ecs.service_autoscale_up_cooldown }}" + ScaleOutCooldown: "{{ deploy_container.aws_ecs.service_autoscale_down_cooldown }}" + DisableScaleIn: false + TargetValue: "{{ deploy_container.aws_ecs.service_autoscale_target_value }}" + delegate_to: localhost + when: deploy_container.aws_ecs.enabled + +- name: Initialise the domains loop var with main domain entry DNS settings. + ansible.builtin.set_fact: + _aws_ecs_cluster_dns_all_domains: + - domain: "{{ deploy_container.aws_ecs.domain_name }}" + zone: "{{ deploy_container.aws_ecs.route_53.zone }}" + aws_profile: "{{ deploy_container.aws_ecs.route_53.aws_profile }}" + when: deploy_container.aws_ecs.enabled + +- name: Add extra_domains so we can loop through DNS records. + ansible.builtin.set_fact: + _aws_ecs_cluster_dns_all_domains: "{{ _aws_ecs_cluster_dns_all_domains + [{'domain': item.domain, 'zone': item.zone, 'aws_profile': item.aws_profile}] }}" + loop: "{{ deploy_container.aws_ecs.acm.extra_domains }}" + when: + - deploy_container.aws_ecs.acm.extra_domains | length > 0 + - deploy_container.aws_ecs.enabled + +- name: Add DNS records in Route 53. + amazon.aws.route53: + state: present + profile: "{{ item.aws_profile }}" + zone: "{{ item.zone }}" + record: "{{ item.domain }}" + type: CNAME + value: "{{ _aws_ecs_cluster_alb.dns_name }}" + overwrite: true + loop: "{{ _aws_ecs_cluster_dns_all_domains }}" + when: + - deploy_container.aws_ecs.route_53.zone | length > 0 + - deploy_container.aws_ecs.enabled diff --git a/roles/deploy_container/tasks/action-destroy.yml b/roles/deploy_container/tasks/action-destroy.yml new file mode 100644 index 00000000..b0b6bd99 --- /dev/null +++ b/roles/deploy_container/tasks/action-destroy.yml @@ -0,0 +1,254 @@ +--- +# Be sure to include your deploy_container variables in your playbook +- name: Set Docker registry username and password. + ansible.builtin.set_fact: + _docker_registry_username: "{{ deploy_container.docker_registry_user }}" + _docker_registry_password: "{{ deploy_container.docker_registry_pass }}" + delegate_to: localhost + +- name: Fetch AWS ECR registry login token. # token valid for 12 hours + ansible.builtin.command: + cmd: "aws ecr get-login-password --region {{ deploy_container.aws_ecr.region }} --profile {{ deploy_container.aws_ecr.aws_profile }}" + when: deploy_container.aws_ecr.enabled + delegate_to: localhost + register: _docker_registry_ecr_token + +- name: Set AWS ECR registry password. + ansible.builtin.set_fact: + _docker_registry_password: "{{ _docker_registry_ecr_token.stdout }}" + when: deploy_container.aws_ecr.enabled + delegate_to: localhost + +- name: Set AWS ECR registry username. + ansible.builtin.set_fact: + _docker_registry_username: "AWS" + when: deploy_container.aws_ecr.enabled + delegate_to: localhost + +- name: Remove Docker credentials file. + ansible.builtin.file: + state: absent + path: "/home/{{ deploy_user }}/.docker/config.json" + delegate_to: localhost + +- name: Log into Docker registry. + community.docker.docker_login: + registry_url: "{{ deploy_container.docker_registry_url }}" + username: "{{ _docker_registry_username }}" + password: "{{ _docker_registry_password }}" + reauthorize: true + delegate_to: localhost + +- name: Destroy matching container images. + community.docker.docker_image: + name: "{{ deploy_container.docker_registry_name }}/{{ deploy_container.container_name }}" + tag: "{{ deploy_container.container_tag | default('latest') }}" + force_absent: true + state: absent + delegate_to: localhost + +# Destroy AWS services +- name: Get minimal ALB information before we destroy it. + amazon.aws.elb_application_lb_info: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + names: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" # 32 char limit + #include_attributes: false # @TODO - these attributes added in amazon.aws 7.0.0 + #include_listeners: false + #include_listener_rules: false + register: _aws_ecs_cluster_alb + delegate_to: localhost + when: + - deploy_container.aws_ecs.enabled + +- name: Destroy ALB. + amazon.aws.elb_application_lb: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + name: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" # 32 char limit + state: absent + wait: true + when: + - deploy_container.aws_ecs.enabled + +- name: Destroy target group. + community.aws.elb_target_group: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + name: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" # 32 char limit + state: absent + wait: true + delegate_to: localhost + when: + - deploy_container.aws_ecs.enabled + +- name: Reduce task count to zero on ECS service. + community.aws.ecs_service: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + state: present + name: "{{ deploy_container.aws_ecs.family_name }}" + cluster: "{{ deploy_container.aws_ecs.cluster_name }}" + desired_count: 0 + force_new_deployment: true + wait: true + delegate_to: localhost + when: + - deploy_container.aws_ecs.enabled + +- name: Destroy ECS service. + community.aws.ecs_service: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + state: absent + name: "{{ deploy_container.aws_ecs.family_name }}" + cluster: "{{ deploy_container.aws_ecs.cluster_name }}" + wait: true + delegate_to: localhost + when: + - deploy_container.aws_ecs.enabled + +# @TODO: We cannot currently use the module for this +# See feature request: https://github.com/ansible-collections/community.aws/issues/2023 +#- name: Get task definition details. +# community.aws.ecs_taskdefinition_info: +# region: "{{ deploy_container.aws_ecs.region }}" +# profile: "{{ deploy_container.aws_ecs.aws_profile }}" +# task_definition: "{{ deploy_container.aws_ecs.family_name }}" +# register: _task_definition_info +# delegate_to: localhost + +- name: Ensure the task definitions ARN list variable is empty. + ansible.builtin.set_fact: + _task_definition_arns_list: [] + when: + - deploy_container.aws_ecs.enabled + +- name: Get active task definition details. + ansible.builtin.command: + cmd: "aws ecs list-task-definitions --status ACTIVE --family-prefix {{ deploy_container.aws_ecs.family_name }} --region {{ deploy_container.aws_ecs.region }} --profile {{ deploy_container.aws_ecs.aws_profile }}" + register: _task_definition_arns_raw + delegate_to: localhost + when: + - deploy_container.aws_ecs.enabled + +- name: Convert CLI output of active ARNs to a YAML variable. + ansible.builtin.set_fact: + _task_definition_arns: "{{ _task_definition_arns_raw.stdout | from_json }}" + when: + - deploy_container.aws_ecs.enabled + +- name: Create a clean list of task definition ARNs. + ansible.builtin.set_fact: + _task_definition_arns_list: "{{ _task_definition_arns.taskDefinitionArns }}" + when: + - deploy_container.aws_ecs.enabled + +- name: Get inactive task definition details. + ansible.builtin.command: + cmd: "aws ecs list-task-definitions --status INACTIVE --family-prefix {{ deploy_container.aws_ecs.family_name }} --region {{ deploy_container.aws_ecs.region }} --profile {{ deploy_container.aws_ecs.aws_profile }}" + register: _task_definition_arns_raw + delegate_to: localhost + when: + - deploy_container.aws_ecs.enabled + +- name: Convert CLI output of inactive ARNs to a YAML variable. + ansible.builtin.set_fact: + _task_definition_arns: "{{ _task_definition_arns_raw.stdout | from_json }}" + when: + - deploy_container.aws_ecs.enabled + +- name: Add inactive task definition ARNs to the YAML list. + ansible.builtin.set_fact: + _task_definition_arns_list: "{{ _task_definition_arns_list + _task_definition_arns.taskDefinitionArns }}" + when: + - deploy_container.aws_ecs.enabled + +- name: Deregister task definitions. + community.aws.ecs_taskdefinition: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + arn: "{{ item }}" + containers: "{{ deploy_container.aws_ecs.containers }}" + state: absent + delegate_to: localhost + with_items: "{{ _task_definition_arns_list }}" + when: + - deploy_container.aws_ecs.enabled + +- name: Delete task definitions. + ansible.builtin.command: + cmd: "aws ecs delete-task-definitions --task-definitions {{ item }} --region {{ deploy_container.aws_ecs.region }} --profile {{ deploy_container.aws_ecs.aws_profile }}" + delegate_to: localhost + with_items: "{{ _task_definition_arns_list }}" + when: + - deploy_container.aws_ecs.enabled + +- name: Destroy scaling policy for ECS service. + community.aws.application_autoscaling_policy: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + state: absent + policy_name: "{{ deploy_container.aws_ecs.family_name }}" + service_namespace: ecs + policy_type: TargetTrackingScaling + resource_id: "service/{{ deploy_container.aws_ecs.cluster_name }}/{{ deploy_container.aws_ecs.family_name }}" + scalable_dimension: ecs:service:DesiredCount + delegate_to: localhost + when: + - deploy_container.aws_ecs.enabled + +# Clean up SSL certificates +- name: Delete the main ACM certificate. + community.aws.acm_certificate: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + domain_name: "{{ deploy_container.aws_ecs.domain_name }}" + state: absent + delegate_to: localhost + when: + - deploy_container.aws_ecs.enabled + - deploy_container.aws_ecs.acm.create_cert + +- name: Delete any extra ACM certificates. + community.aws.acm_certificate: + region: "{{ deploy_container.aws_ecs.region }}" + profile: "{{ deploy_container.aws_ecs.aws_profile }}" + domain_name: "{{ item.domain }}" + state: absent + with_items: "{{ deploy_container.aws_ecs.acm.extra_domains }}" + when: + - deploy_container.aws_ecs.acm.extra_domains | length > 0 + - deploy_container.aws_ecs.enabled + delegate_to: localhost + +# Clean up DNS +- name: Initialise the domains loop var with main domain entry DNS settings. + ansible.builtin.set_fact: + _aws_ecs_cluster_dns_all_domains: + - domain: "{{ deploy_container.aws_ecs.domain_name }}" + zone: "{{ deploy_container.aws_ecs.route_53.zone }}" + aws_profile: "{{ deploy_container.aws_ecs.route_53.aws_profile }}" + when: + - deploy_container.aws_ecs.enabled + +- name: Add extra_domains so we can loop through DNS records. + ansible.builtin.set_fact: + _aws_ecs_cluster_dns_all_domains: "{{ _aws_ecs_cluster_dns_all_domains + [{'domain': item.domain, 'zone': item.zone, 'aws_profile': item.aws_profile}] }}" + loop: "{{ deploy_container.aws_ecs.acm.extra_domains }}" + when: + - deploy_container.aws_ecs.enabled + - deploy_container.aws_ecs.acm.extra_domains | length > 0 + +- name: Remove DNS records in Route 53. + amazon.aws.route53: + state: absent + profile: "{{ item.aws_profile }}" + zone: "{{ item.zone }}" + record: "{{ item.domain }}" + type: CNAME + value: "{{ _aws_ecs_cluster_alb.load_balancers[0].dns_name }}" + loop: "{{ _aws_ecs_cluster_dns_all_domains }}" + when: + - deploy_container.aws_ecs.enabled + - deploy_container.aws_ecs.route_53.zone | length > 0 diff --git a/roles/deploy_container/tasks/main.yml b/roles/deploy_container/tasks/main.yml index 6c68bebd..ce3e618f 100644 --- a/roles/deploy_container/tasks/main.yml +++ b/roles/deploy_container/tasks/main.yml @@ -1,363 +1,10 @@ --- -# Build and ship a container image -- name: Create Dockerfile from template. - ansible.builtin.template: - src: "{{ deploy_container.dockerfile_template }}" - dest: "{{ deploy_container.docker_build_dir }}/Dockerfile" - delegate_to: localhost - -- name: Set Docker registry username and password. - ansible.builtin.set_fact: - _docker_registry_username: "{{ deploy_container.docker_registry_user }}" - _docker_registry_password: "{{ deploy_container.docker_registry_pass }}" - delegate_to: localhost - -- name: Fetch AWS ECR registry login token. # token valid for 12 hours - ansible.builtin.command: - cmd: "aws ecr get-login-password --region {{ deploy_container.aws_ecr.region }} --profile {{ deploy_container.aws_ecr.aws_profile }}" - when: deploy_container.aws_ecr.enabled - delegate_to: localhost - register: _docker_registry_ecr_token - -- name: Set AWS ECR registry password. - ansible.builtin.set_fact: - _docker_registry_password: "{{ _docker_registry_ecr_token.stdout }}" - when: deploy_container.aws_ecr.enabled - delegate_to: localhost - -- name: Set AWS ECR registry username. - ansible.builtin.set_fact: - _docker_registry_username: "AWS" - when: deploy_container.aws_ecr.enabled - delegate_to: localhost - -- name: Remove Docker credentials file. - ansible.builtin.file: - state: absent - path: "/home/{{ deploy_user }}/.docker/config.json" - delegate_to: localhost - -- name: Log into Docker registry. - community.docker.docker_login: - registry_url: "{{ deploy_container.docker_registry_url }}" - username: "{{ _docker_registry_username }}" - password: "{{ _docker_registry_password }}" - reauthorize: true - delegate_to: localhost - -- name: Build and push container image. - community.docker.docker_image: - build: - path: "{{ deploy_container.docker_build_dir }}" - name: "{{ deploy_container.docker_registry_name }}/{{ deploy_container.container_name }}" - tag: "{{ deploy_container.container_tag | default('latest') }}" - push: true - source: build - force_source: "{{ deploy_container.container_force_build }}" - force_tag: "{{ deploy_container.container_force_build }}" - delegate_to: localhost - -# Fetch the ACM role from ce-provision -- name: Ensure the aws_acm directory exists. - ansible.builtin.file: - path: "{{ _ce_deploy_base_dir }}/roles/aws_acm/{{ item }}" - state: directory - mode: '0755' - delegate_to: localhost - with_items: - - tasks - - defaults - -- name: Fetch the aws_acm files. - ansible.builtin.get_url: - url: "https://raw.githubusercontent.com/codeenigma/ce-provision/1.x/roles/aws/aws_acm/{{ item }}/main.yml" - dest: "{{ _ce_deploy_base_dir }}/roles/aws_acm/{{ item }}/main.yml" - delegate_to: localhost - with_items: - - tasks - - defaults - -- name: Fetch the aws_acm tasks. - ansible.builtin.get_url: - url: https://raw.githubusercontent.com/codeenigma/ce-provision/1.x/roles/aws/aws_acm/tasks/main.yml - dest: "{{ _ce_deploy_base_dir }}/roles/aws_acm/tasks/main.yml" - delegate_to: localhost - -# Gather all network information -- name: Gather VPC information. - amazon.aws.ec2_vpc_net_info: - profile: "{{ deploy_container.aws_ecs.aws_profile }}" - region: "{{ deploy_container.aws_ecs.region }}" - filters: - "tag:Name": "{{ deploy_container.aws_ecs.vpc_name }}" - register: _aws_ecs_cluster_vpc - delegate_to: localhost - when: - - deploy_container.aws_ecs.enabled - - deploy_container.aws_ecs.vpc_name is defined - - deploy_container.aws_ecs.vpc_name | length > 0 - -- name: Set the VPC id from name. - ansible.builtin.set_fact: - _aws_ecs_cluster_vpc_id: "{{ _aws_ecs_cluster_vpc.vpcs[0].vpc_id }}" - when: - - deploy_container.aws_ecs.enabled - - deploy_container.aws_ecs.vpc_name is defined - - deploy_container.aws_ecs.vpc_name | length > 0 - -- name: Use provided VPC id. - ansible.builtin.set_fact: - _aws_ecs_cluster_vpc_id: "{{ deploy_container.aws_ecs.vpc_id }}" - when: - - deploy_container.aws_ecs.enabled - - (deploy_container.aws_ecs.vpc_name is not defined or deploy_container.aws_ecs.vpc_name | length < 0) - -- name: Reset subnets lists. - ansible.builtin.set_fact: - _aws_ecs_cluster_public_subnets_ids: [] - _aws_ecs_cluster_private_subnets_ids: [] - when: deploy_container.aws_ecs.enabled - -- name: Construct list of public subnet IDs. - ansible.builtin.include_tasks: subnet-public.yml - with_items: "{{ deploy_container.aws_ecs.elb_subnets }}" - loop_control: - loop_var: subnet - when: deploy_container.aws_ecs.enabled - -- name: Construct list of private subnet IDs. - ansible.builtin.include_tasks: subnet-private.yml - with_items: "{{ deploy_container.aws_ecs.service_subnets }}" - loop_control: - loop_var: subnet - when: deploy_container.aws_ecs.enabled - -# Construct AWS supporting assets -- name: Create task definition. - community.aws.ecs_taskdefinition: - region: "{{ deploy_container.aws_ecs.region }}" - profile: "{{ deploy_container.aws_ecs.aws_profile }}" - family: "{{ deploy_container.aws_ecs.family_name }}" - execution_role_arn: "{{ deploy_container.aws_ecs.task_execution_role_arn }}" - task_role_arn: "{{ deploy_container.aws_ecs.task_role_arn | default(omit) }}" - containers: "{{ deploy_container.aws_ecs.containers }}" - launch_type: "{{ deploy_container.aws_ecs.launch_type }}" - cpu: "{{ deploy_container.aws_ecs.cpu | default(omit) }}" - memory: "{{ deploy_container.aws_ecs.memory | default(omit) }}" - state: present - network_mode: "{{ deploy_container.aws_ecs.network_mode }}" - volumes: "{{ deploy_container.aws_ecs.volumes | default(omit) }}" - force_create: "{{ deploy_container.aws_ecs.task_definition_force_create }}" - delegate_to: localhost - when: deploy_container.aws_ecs.enabled - -- name: Create a target group with IP address targets. - community.aws.elb_target_group: - region: "{{ deploy_container.aws_ecs.region }}" - profile: "{{ deploy_container.aws_ecs.aws_profile }}" - name: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" # 32 char limit - protocol: "{{ deploy_container.aws_ecs.target_group_protocol }}" - port: "{{ deploy_container.aws_ecs.target_group_port }}" - vpc_id: "{{ _aws_ecs_cluster_vpc_id }}" - health_check_protocol: "{{ deploy_container.aws_ecs.health_check.protocol }}" - health_check_path: "{{ deploy_container.aws_ecs.health_check.path }}" - successful_response_codes: "{{ deploy_container.aws_ecs.health_check.response_codes }}" - target_type: ip - targets: "{{ deploy_container.aws_ecs.targets }}" - state: present - wait_timeout: "{{ deploy_container.aws_ecs.target_group_wait_timeout }}" - wait: true - register: _aws_ecs_target_group - delegate_to: localhost - when: deploy_container.aws_ecs.enabled - -- name: Create SSL certificate for load balancer. - ansible.builtin.include_role: - name: aws_acm - vars: - aws_acm: - region: "{{ deploy_container.aws_ecs.region }}" - aws_profile: "{{ deploy_container.aws_ecs.aws_profile }}" - tags: "{{ deploy_container.aws_ecs.tags }}" - export: false - domain_name: "{{ deploy_container.aws_ecs.domain_name }}" - extra_domains: "{{ deploy_container.aws_ecs.acm.extra_domains }}" - route_53: - aws_profile: "{{ deploy_container.aws_ecs.route_53.aws_profile }}" - zone: "{{ deploy_container.aws_ecs.route_53.zone }}" - when: - - deploy_container.aws_ecs.acm.create_cert - - deploy_container.aws_ecs.enabled - -- name: Default to provided SSL certificate ARN. - ansible.builtin.set_fact: - _ssl_certificate_ARN: "{{ deploy_container.aws_ecs.ssl_certificate_ARN }}" - when: deploy_container.aws_ecs.enabled - -- name: If provided, override SSL certificate ARN with the one received from ACM. - ansible.builtin.set_fact: - _ssl_certificate_ARN: "{{ aws_acm_certificate_arn }}" - when: - - deploy_container.aws_ecs.acm.create_cert - - deploy_container.aws_ecs.enabled - -- name: Define default ALB listeners. - ansible.builtin.set_fact: - _aws_ecs_cluster_listeners_http: - Protocol: HTTP - Port: "{{ deploy_container.aws_ecs.elb_http_port }}" - DefaultActions: - - Type: forward - TargetGroupName: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" - Rules: "{{ deploy_container.aws_ecs.elb_listener_http_rules }}" - _aws_ecs_cluster_listeners_redirect: - Protocol: HTTP - Port: "{{ deploy_container.aws_ecs.elb_http_port }}" - DefaultActions: - - Type: redirect - RedirectConfig: - Protocol: HTTPS - Host: "#{host}" - Query: "#{query}" - Path: "/#{path}" - Port: "{{ deploy_container.aws_ecs.elb_https_port }}" - StatusCode: HTTP_301 - _aws_ecs_cluster_listeners_https: - Protocol: HTTPS - Port: "{{ deploy_container.aws_ecs.elb_https_port }}" - SslPolicy: "{{ deploy_container.aws_ecs.elb_ssl_policy }}" - Certificates: - - CertificateArn: "{{ _ssl_certificate_ARN }}" - DefaultActions: - - Type: forward - TargetGroupName: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" - Rules: "{{ deploy_container.aws_ecs.elb_listener_https_rules }}" - when: deploy_container.aws_ecs.enabled - -- name: Add HTTP listeners. - ansible.builtin.set_fact: - _aws_ecs_cluster_listeners: "{{ [ _aws_ecs_cluster_listeners_http ] }}" - when: - - _ssl_certificate_ARN | length < 1 - - deploy_container.aws_ecs.enabled - -- name: Add HTTPS Listener. - ansible.builtin.set_fact: - _aws_ecs_cluster_listeners: "{{ [ _aws_ecs_cluster_listeners_redirect, _aws_ecs_cluster_listeners_https ] }}" - when: - - _ssl_certificate_ARN | length > 1 - - deploy_container.aws_ecs.enabled - -- name: Add custom Listeners. - ansible.builtin.set_fact: - _aws_ecs_cluster_listeners: "{{ _aws_ecs_cluster_listeners + deploy_container.aws_ecs.elb_listeners }}" - when: - - deploy_container.aws_ecs.elb_listeners is defined - - deploy_container.aws_ecs.elb_listeners | length - - deploy_container.aws_ecs.enabled - -- name: Create an ALB. - amazon.aws.elb_application_lb: - region: "{{ deploy_container.aws_ecs.region }}" - profile: "{{ deploy_container.aws_ecs.aws_profile }}" - name: "{{ deploy_container.aws_ecs.target_group_name | truncate(32, true, '', 0) }}" # 32 char limit - state: present - tags: "{{ deploy_container.aws_ecs.tags }}" - subnets: "{{ _aws_ecs_cluster_public_subnets_ids }}" - security_groups: "{{ deploy_container.aws_ecs.elb_security_groups }}" - listeners: "{{ _aws_ecs_cluster_listeners }}" - idle_timeout: "{{ deploy_container.aws_ecs.elb_idle_timeout }}" - ip_address_type: "{{ deploy_container.aws_ecs.elb_ip_address_type }}" - register: _aws_ecs_cluster_alb - delegate_to: localhost - when: deploy_container.aws_ecs.enabled - -- name: Set task definition name. - ansible.builtin.set_fact: - _aws_ecs_service_task_definition: "{{ deploy_container.aws_ecs.family_name }}" - when: deploy_container.aws_ecs.enabled - -- name: Set task definition revision if applicable. - ansible.builtin.set_fact: - _aws_ecs_service_task_definition: "{{ deploy_container.aws_ecs.family_name }}:{{ deploy_container.aws_ecs.task_definition_revision }}" - when: - - deploy_container.aws_ecs.task_definition_revision | length > 0 - - deploy_container.aws_ecs.enabled - -- name: Create ECS service. - community.aws.ecs_service: - region: "{{ deploy_container.aws_ecs.region }}" - profile: "{{ deploy_container.aws_ecs.aws_profile }}" - state: present - name: "{{ deploy_container.aws_ecs.family_name }}" - cluster: "{{ deploy_container.aws_ecs.cluster_name }}" - task_definition: "{{ _aws_ecs_service_task_definition }}" - desired_count: "{{ deploy_container.aws_ecs.task_count }}" - launch_type: "{{ deploy_container.aws_ecs.launch_type }}" - platform_version: LATEST - load_balancers: # see https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LoadBalancer.html - - containerName: "{{ deploy_container.container_name }}" - containerPort: "{{ deploy_container.aws_ecs.target_group_port }}" - targetGroupArn: "{{ _aws_ecs_target_group.target_group_arn }}" - network_configuration: - subnets: "{{ _aws_ecs_cluster_private_subnets_ids }}" # internal private subnet - security_groups: "{{ deploy_container.aws_ecs.security_groups }}" - assign_public_ip: "{{ deploy_container.aws_ecs.service_public_container_ip }}" - tags: "{{ deploy_container.aws_ecs.tags }}" - enable_execute_command: "{{ deploy_container.aws_ecs.service_enable_ssm }}" - force_new_deployment: "{{ deploy_container.aws_ecs.service_force_refresh }}" - wait: true - delegate_to: localhost - when: deploy_container.aws_ecs.enabled - -- name: Create target tracking scaling policy for ECS service. - community.aws.application_autoscaling_policy: - region: "{{ deploy_container.aws_ecs.region }}" - profile: "{{ deploy_container.aws_ecs.aws_profile }}" - state: present - policy_name: "{{ deploy_container.aws_ecs.family_name }}" - service_namespace: ecs - resource_id: "service/{{ deploy_container.aws_ecs.cluster_name }}/{{ deploy_container.aws_ecs.family_name }}" - scalable_dimension: ecs:service:DesiredCount - minimum_tasks: "{{ deploy_container.aws_ecs.task_minimum_count }}" - maximum_tasks: "{{ deploy_container.aws_ecs.task_maximum_count }}" - policy_type: TargetTrackingScaling - target_tracking_scaling_policy_configuration: - PredefinedMetricSpecification: - PredefinedMetricType: "{{ deploy_container.aws_ecs.service_autoscale_metric_type }}" - ScaleInCooldown: "{{ deploy_container.aws_ecs.service_autoscale_up_cooldown }}" - ScaleOutCooldown: "{{ deploy_container.aws_ecs.service_autoscale_down_cooldown }}" - DisableScaleIn: false - TargetValue: "{{ deploy_container.aws_ecs.service_autoscale_target_value }}" - delegate_to: localhost - when: deploy_container.aws_ecs.enabled - -- name: Initialise the domains loop var with main domain entry DNS settings. - ansible.builtin.set_fact: - _aws_ecs_cluster_dns_all_domains: - - domain: "{{ deploy_container.aws_ecs.domain_name }}" - zone: "{{ deploy_container.aws_ecs.route_53.zone }}" - aws_profile: "{{ deploy_container.aws_ecs.route_53.aws_profile }}" - when: deploy_container.aws_ecs.enabled - -- name: Add extra_domains so we can loop through DNS records. - ansible.builtin.set_fact: - _aws_ecs_cluster_dns_all_domains: "{{ _aws_ecs_cluster_dns_all_domains + [{'domain': item.domain, 'zone': item.zone, 'aws_profile': item.aws_profile}] }}" - loop: "{{ deploy_container.aws_ecs.acm.extra_domains }}" - when: - - deploy_container.aws_ecs.acm.extra_domains | length > 0 - - deploy_container.aws_ecs.enabled - -- name: Add DNS records in Route 53. - amazon.aws.route53: - state: present - profile: "{{ item.aws_profile }}" - zone: "{{ item.zone }}" - record: "{{ item.domain }}" - type: CNAME - value: "{{ _aws_ecs_cluster_alb.dns_name }}" - overwrite: true - loop: "{{ _aws_ecs_cluster_dns_all_domains }}" - when: - - deploy_container.aws_ecs.route_53.zone | length > 0 - - deploy_container.aws_ecs.enabled \ No newline at end of file +- name: Execute a container deployment. + ansible.builtin.include_tasks: + file: "action-{{ deploy_container.action }}.yml" + when: deploy_operation == 'deploy' + +- name: Cleanup dangling containers. + ansible.builtin.include_tasks: + file: action-cleanup.yml + when: deploy_operation == 'cleanup' From c15078a16e92af262e0d0a6107e83cbcc9415233 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Wed, 20 Dec 2023 18:32:52 +0100 Subject: [PATCH 27/28] Documentation update. --- docs/roles/cache_clear/cache_clear-opcache.md | 2 +- docs/roles/database_backup/database_backup-mysql.md | 1 - docs/roles/deploy_code.md | 2 ++ docs/roles/deploy_container.md | 3 ++- roles/cache_clear/cache_clear-opcache/README.md | 2 +- roles/database_backup/database_backup-mysql/README.md | 1 - roles/deploy_code/README.md | 2 ++ roles/deploy_container/README.md | 3 ++- 8 files changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/roles/cache_clear/cache_clear-opcache.md b/docs/roles/cache_clear/cache_clear-opcache.md index bb0bd05d..267485f4 100644 --- a/docs/roles/cache_clear/cache_clear-opcache.md +++ b/docs/roles/cache_clear/cache_clear-opcache.md @@ -14,7 +14,7 @@ cache_clear_opcache: # eg. # --fcgi=127.0.0.1:9000 # Leave blank to use /etc/cachetool.yml - # adapter: "127.0.0.1:9081" # Leave commented to automatically detect the adapter based on PHP version. + # adapter: "--fcgi=127.0.0.1:9081" # Leave commented to automatically detect the adapter based on PHP version. # Bins to clear. clear_opcache: true clear_apcu: false diff --git a/docs/roles/database_backup/database_backup-mysql.md b/docs/roles/database_backup/database_backup-mysql.md index 6637e2da..1200f97f 100644 --- a/docs/roles/database_backup/database_backup-mysql.md +++ b/docs/roles/database_backup/database_backup-mysql.md @@ -29,7 +29,6 @@ mysql_backup: credentials_file: "/home/{{ deploy_user }}/.mysql.creds" #handling: static # optional override to the main handling method on a per database basis - ``` diff --git a/docs/roles/deploy_code.md b/docs/roles/deploy_code.md index 377ca80c..5d4cc722 100644 --- a/docs/roles/deploy_code.md +++ b/docs/roles/deploy_code.md @@ -121,6 +121,8 @@ deploy_code: service_action: reload # Trigger an API call to rebuild infra after a deploy, e.g. if you need to repack an AMI. rebuild_infra: false + # Used to skip tasks to fix ownership and permissions, drupal needs this set to true by default + fix_cleanup_perms: true # Details of API call to trigger. See api_call role. api_call: type: gitlab diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index f127bb61..752e3f59 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -41,6 +41,7 @@ It is worth noting that even if you put your containers on private subnets and c ```yaml --- deploy_container: + action: create # can also be destroy container_name: example-container container_tag: latest # tag will take format container_name:container_tag container_force_build: true # force Docker to build and tag a new image @@ -109,7 +110,7 @@ deploy_container: launch_type: FARGATE network_mode: awsvpc #volumes: [] # list of additional volumes to attach - target_group_name: example # 32 character limit + target_group_name: example # can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen target_group_protocol: http target_group_port: 8080 # ports lower than 1024 will require the app to be configured to run as a privileged user in the Dockerfile target_group_wait_timeout: 200 # how long to wait for target group events to complete diff --git a/roles/cache_clear/cache_clear-opcache/README.md b/roles/cache_clear/cache_clear-opcache/README.md index bb0bd05d..267485f4 100644 --- a/roles/cache_clear/cache_clear-opcache/README.md +++ b/roles/cache_clear/cache_clear-opcache/README.md @@ -14,7 +14,7 @@ cache_clear_opcache: # eg. # --fcgi=127.0.0.1:9000 # Leave blank to use /etc/cachetool.yml - # adapter: "127.0.0.1:9081" # Leave commented to automatically detect the adapter based on PHP version. + # adapter: "--fcgi=127.0.0.1:9081" # Leave commented to automatically detect the adapter based on PHP version. # Bins to clear. clear_opcache: true clear_apcu: false diff --git a/roles/database_backup/database_backup-mysql/README.md b/roles/database_backup/database_backup-mysql/README.md index 6637e2da..1200f97f 100644 --- a/roles/database_backup/database_backup-mysql/README.md +++ b/roles/database_backup/database_backup-mysql/README.md @@ -29,7 +29,6 @@ mysql_backup: credentials_file: "/home/{{ deploy_user }}/.mysql.creds" #handling: static # optional override to the main handling method on a per database basis - ``` diff --git a/roles/deploy_code/README.md b/roles/deploy_code/README.md index 377ca80c..5d4cc722 100644 --- a/roles/deploy_code/README.md +++ b/roles/deploy_code/README.md @@ -121,6 +121,8 @@ deploy_code: service_action: reload # Trigger an API call to rebuild infra after a deploy, e.g. if you need to repack an AMI. rebuild_infra: false + # Used to skip tasks to fix ownership and permissions, drupal needs this set to true by default + fix_cleanup_perms: true # Details of API call to trigger. See api_call role. api_call: type: gitlab diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index f127bb61..752e3f59 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -41,6 +41,7 @@ It is worth noting that even if you put your containers on private subnets and c ```yaml --- deploy_container: + action: create # can also be destroy container_name: example-container container_tag: latest # tag will take format container_name:container_tag container_force_build: true # force Docker to build and tag a new image @@ -109,7 +110,7 @@ deploy_container: launch_type: FARGATE network_mode: awsvpc #volumes: [] # list of additional volumes to attach - target_group_name: example # 32 character limit + target_group_name: example # can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen target_group_protocol: http target_group_port: 8080 # ports lower than 1024 will require the app to be configured to run as a privileged user in the Dockerfile target_group_wait_timeout: 200 # how long to wait for target group events to complete From 2cc01f13af1b49da7fe45bab4907c17096e5dd19 Mon Sep 17 00:00:00 2001 From: gregharvey Date: Wed, 20 Dec 2023 18:48:36 +0100 Subject: [PATCH 28/28] Adding a note to docs about the container destroy action. --- docs/roles/deploy_container.md | 2 ++ roles/deploy_container/README.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/roles/deploy_container.md b/docs/roles/deploy_container.md index 752e3f59..06847722 100644 --- a/docs/roles/deploy_container.md +++ b/docs/roles/deploy_container.md @@ -8,6 +8,8 @@ sudo usermod -aG docker deploy This can be handled automatically by [`ce-provision`](https://github.com/codeenigma/ce-provision) using the `ce_deploy` and `docker_ce` roles. +If you set the `deploy_container.action` to `destroy` then the role will also take care of tidying up containers. If it is an AWS ECS deployment then it will also tidy up the ECS service for you. + ## AWS IAM requirements AWS integration requires the AWS CLI user provided for `ce-deploy` to have certain managed AWS policies attached. diff --git a/roles/deploy_container/README.md b/roles/deploy_container/README.md index 752e3f59..06847722 100644 --- a/roles/deploy_container/README.md +++ b/roles/deploy_container/README.md @@ -8,6 +8,8 @@ sudo usermod -aG docker deploy This can be handled automatically by [`ce-provision`](https://github.com/codeenigma/ce-provision) using the `ce_deploy` and `docker_ce` roles. +If you set the `deploy_container.action` to `destroy` then the role will also take care of tidying up containers. If it is an AWS ECS deployment then it will also tidy up the ECS service for you. + ## AWS IAM requirements AWS integration requires the AWS CLI user provided for `ce-deploy` to have certain managed AWS policies attached.