diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..a987d842 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,29 @@ +# Build as: docker build -t aws-stack . +# Run as: docker run --rm -it --net=host \ +# -v $PWD:$PWD -w $PWD \ +# -v /tmp:/tmp -v ~/.aws:/root/.aws \ +# -e AWS_DEFAULT_REGION=us-east-1 aws-stack make amis + +FROM python:3.5-slim +RUN pip3 install mypy-lang==0.4 flake8==2.5.4 pyyaml boto3 +RUN apt-get update \ + && apt-get install -y curl unzip make \ + && apt-get clean + +COPY tools /usr/local/bin +RUN curl -sL "https://releases.hashicorp.com/terraform/0.7.2/terraform_0.7.2_linux_amd64.zip"> terraform.zip \ + && unzip terraform.zip \ + && mv terraform /usr/local/bin + +RUN curl -sL "https://releases.hashicorp.com/packer/0.10.1/packer_0.10.1_linux_amd64.zip" > packer.zip \ + && unzip packer.zip \ + && mv packer /usr/local/bin + +RUN curl -sL -o /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.1.3/dumb-init_1.1.3_amd64 && chmod +x /usr/local/bin/dumb-init + +ENTRYPOINT ["/usr/local/bin/dumb-init"] + +ADD . /src + +RUN cd /src && make install + diff --git a/Makefile b/Makefile index 80d3d819..e58e87f8 100644 --- a/Makefile +++ b/Makefile @@ -25,10 +25,10 @@ endif ifeq (${platform},Darwin) install-python-dependencies: - sudo -H pip install --upgrade ${pydeps} + sudo -H pip3 install --upgrade ${pydeps} else install-python-dependencies: - pip install --upgrade pyyaml boto3 + pip3 install --upgrade pyyaml boto3 endif install-tools: $(tools) diff --git a/Readme.md b/Readme.md index c68fe183..1f6da9a6 100644 --- a/Readme.md +++ b/Readme.md @@ -1,3 +1,7 @@ +# ⚠️ Unmaintained ⚠️ + +This repository is unmaintained, but left as a historical relic for any wishing to adapt it. Godspeed! + # Segment Stack [![CircleCI](https://circleci.com/gh/segmentio/stack.svg?style=shield&circle-token=21d1df0dfd7e405582403f65cd1a270f9f52d7a4)](https://circleci.com/gh/segmentio/stack) [terraform]: https://terraform.io @@ -53,7 +57,7 @@ This will automatically setup your basic networking configuration with an auto-s Now that we've got all the basics setup, how about adding a service? -Services pull images from Docker Hub and then run the images as contianers via ECS. They are automatically discoverable at `` and will run with zero-downtime deploys. +Services pull images from Docker Hub and then run the images as containers via ECS. They are automatically discoverable at `` and will run with zero-downtime deploys. We can can use the `stack//service` module to automatically provision all of the required parts of the service, including a load balancer, ECS service, and Route53 DNS entry. Here's a sample service definition, try adding it to your `terraform.tf` file. @@ -155,12 +159,16 @@ traffic in and out of the different subnets. The Stack terraform will automatica Traffic from each internal subnet to the outside world will run through the associated NAT gateway. +Alternatively, setting the `use_nat_instances` VPC module variable to true, will use [EC2 NAT instances][nat-instances] instead of the NAT gateway. NAT instances cost less than the NAT gateway, can be shutdown when not in use, and may be preferred in development environments. By default, NAT instances will not use [Elastic IPs][elastic-ip] to avoid a small hourly charge if the NAT instances are not running full time. To use Elastic IPs for the NAT instances, set the `use_eip_with_nat_instances` VPC module variable to true. + For further reading, check out these sources: - [Recommended Address Space](http://serverfault.com/questions/630022/what-is-the-recommended-cidr-when-creating-vpc-on-aws) - [Practical VPC Design](https://medium.com/aws-activate-startup-blog/practical-vpc-design-8412e1a18dcc) [nat-gateway]: http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html +[nat-instances]: http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html +[elastic-ip]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html ### Instances diff --git a/circle.yml b/circle.yml index 04eab8e2..e0d3b56b 100644 --- a/circle.yml +++ b/circle.yml @@ -1,7 +1,7 @@ dependencies: override: - - sudo curl -L# https://releases.hashicorp.com/terraform/0.6.16/terraform_0.6.16_linux_amd64.zip -o /usr/local/bin/tf.zip + - sudo curl -L# https://releases.hashicorp.com/terraform/0.7.5/terraform_0.7.5_linux_amd64.zip -o /usr/local/bin/tf.zip - cd /usr/local/bin && sudo unzip tf.zip test: diff --git a/defaults/main.tf b/defaults/main.tf index bd0d6892..a312547c 100644 --- a/defaults/main.tf +++ b/defaults/main.tf @@ -24,16 +24,16 @@ variable "cidr" { variable "default_ecs_ami" { default = { - us-east-1 = "ami-5f3ff932" - us-west-1 = "ami-31c08551" - us-west-2 = "ami-f3985d93" - eu-west-1 = "ami-ab4bd5d8" - eu-central-1 = "ami-6c58b103" - ap-northeast-1 = "ami-a69d68c7" - ap-northeast-2 = "ami-7b2de615" - ap-southeast-1 = "ami-550dde36" - ap-southeast-2 = "ami-c799b0a4" - sa-east-1 = "ami-0274fe6e" + us-east-1 = "ami-dde4e6ca" + us-west-1 = "ami-6d21770d" + us-west-2 = "ami-97da70f7" + eu-west-1 = "ami-c41f3bb7" + eu-central-1 = "ami-4ba16024" + ap-northeast-1 = "ami-90ea86f7" + ap-northeast-2 = "ami-8a4b9ce4" + ap-southeast-1 = "ami-d603afb5" + ap-southeast-2 = "ami-1ddce47e" + sa-east-1 = "ami-29039a45" } } diff --git a/docs.md b/docs.md index 4398bb3d..89862249 100644 --- a/docs.md +++ b/docs.md @@ -1,11 +1,10 @@ - # Stack - The stack module combines sub modules to create a complete - stack with `vpc`, a default ecs cluster with auto scaling - and a bastion node that enables you to access all instances. +The stack module combines sub modules to create a complete +stack with `vpc`, a default ecs cluster with auto scaling +and a bastion node that enables you to access all instances. - Usage: +Usage: module "stack" { source = "github.com/segmentio/stack" @@ -13,7 +12,24 @@ environment = "prod" } - +## Available Modules + +* [stack](#stack) +* [bastion](#bastion) +* [defaults](#defaults) +* [dhcp](#dhcp) +* [dns](#dns) +* [ecs-cluster](#ecs-cluster) +* [elb](#elb) +* [iam-user](#iam-user) +* [rds-cluster](#rds-cluster) +* [s3-logs](#s3-logs) +* [security-groups](#security-groups) +* [service](#service) +* [task](#task) +* [vpc](#vpc) +* [web-service](#web-service) +* [worker](#worker) ## Inputs @@ -22,24 +38,32 @@ | name | the name of your stack, e.g. "segment" | - | yes | | environment | the name of your environment, e.g. "prod-west" | - | yes | | key_name | the name of the ssh key to use, e.g. "internal-key" | - | yes | -| domain_name | the internal DNS name to use with services | `"stack.local"` | no | -| domain_name_servers | the internal DNS servers, defaults to the internal route53 server of the VPC | `""` | no | -| region | the AWS region in which resources are created, you must set the availability_zones variable as well if you define this value to something other than the default | `"us-west-2"` | no | -| cidr | the CIDR block to provision for the VPC, if set to something other than the default, both internal_subnets and external_subnets have to be defined as well | `"10.30.0.0/16"` | no | -| internal_subnets | a comma-separated list of CIDRs for internal subnets in your VPC, must be set if the cidr variable is defined, needs to have as many elements as there are availability zones | `"10.30.0.0/19,10.30.64.0/19,10.30.128.0/19"` | no | -| external_subnets | a comma-separated list of CIDRs for external subnets in your VPC, must be set if the cidr variable is defined, needs to have as many elements as there are availability zones | `"10.30.32.0/20,10.30.96.0/20,10.30.160.0/20"` | no | -| availability_zones | a comma-separated list of availability zones, defaults to all AZ of the region, if set to something other than the defaults, both internal_subnets and external_subnets have to be defined as well | `"us-west-2a,us-west-2b,us-west-2c"` | no | -| ecs_instance_type | the instance type to use for your default ecs cluster | `"m4.large"` | no | -| ecs_instance_ebs_optimized | use EBS - not all instance types support EBS | `"true"` | no | +| domain_name | the internal DNS name to use with services | `stack.local` | no | +| domain_name_servers | the internal DNS servers, defaults to the internal route53 server of the VPC | `` | no | +| region | the AWS region in which resources are created, you must set the availability_zones variable as well if you define this value to something other than the default | `us-west-2` | no | +| cidr | the CIDR block to provision for the VPC, if set to something other than the default, both internal_subnets and external_subnets have to be defined as well | `10.30.0.0/16` | no | +| internal_subnets | a list of CIDRs for internal subnets in your VPC, must be set if the cidr variable is defined, needs to have as many elements as there are availability zones | `` | no | +| external_subnets | a list of CIDRs for external subnets in your VPC, must be set if the cidr variable is defined, needs to have as many elements as there are availability zones | `` | no | +| use_nat_instances | use NAT EC2 instances instead of the NAT gateway service | `false` | no | +| use_eip_with_nat_instances | use Elastic IPs with NAT instances if `use_nat_instances` is true | `false` | no | +| nat_instance_type | the EC2 instance type for NAT instances if `use_nat_instances` is true | `t2.nano` | no | +| nat_instance_ssh_key_name | the name of the ssh key to use with NAT instances if `use_nat_instances` is true | "" | no | +| availability_zones | a comma-separated list of availability zones, defaults to all AZ of the region, if set to something other than the defaults, both internal_subnets and external_subnets have to be defined as well | `` | no | +| bastion_instance_type | Instance type for the bastion | `t2.micro` | no | +| ecs_cluster_name | the name of the cluster, if not specified the variable name will be used | `` | no | +| ecs_instance_type | the instance type to use for your default ecs cluster | `m4.large` | no | +| ecs_instance_ebs_optimized | use EBS - not all instance types support EBS | `true` | no | | ecs_min_size | the minimum number of instances to use in the default ecs cluster | `3` | no | | ecs_max_size | the maximum number of instances to use in the default ecs cluster | `100` | no | | ecs_desired_capacity | the desired number of instances to use in the default ecs cluster | `3` | no | | ecs_root_volume_size | the size of the ecs instance root volume | `25` | no | | ecs_docker_volume_size | the size of the ecs instance docker volume | `25` | no | -| ecs_docker_auth_type | The docker auth type, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the possible values | `""` | no | -| ecs_docker_auth_data | A JSON object providing the docker auth data, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the supported formats | `""` | no | -| ecs_security_groups | A comma separated list of security groups from which ingest traffic will be allowed on the ECS cluster, it defaults to allowing ingress traffic on port 22 and coming grom the ELBs | `""` | no | -| ecs_ami | The AMI that will be used to launch EC2 instances in the ECS cluster | `""` | no | +| ecs_docker_auth_type | The docker auth type, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the possible values | `` | no | +| ecs_docker_auth_data | A JSON object providing the docker auth data, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the supported formats | `` | no | +| ecs_security_groups | A comma separated list of security groups from which ingest traffic will be allowed on the ECS cluster, it defaults to allowing ingress traffic on port 22 and coming from the ELBs | `` | no | +| ecs_ami | The AMI that will be used to launch EC2 instances in the ECS cluster | `` | no | +| extra_cloud_config_type | Extra cloud config type | `text/cloud-config` | no | +| extra_cloud_config_content | Extra cloud config content | `` | no | ## Outputs @@ -53,6 +77,7 @@ | internal_subnets | Comma separated list of internal subnet IDs. | | external_subnets | Comma separated list of external subnet IDs. | | iam_role | ECS Service IAM role. | +| iam_role_default_ecs_role_id | Default ECS role ID. Useful if you want to add a new policy to that role. | | log_bucket_id | S3 bucket ID for ELB logs. | | domain_name | The internal domain name, e.g "stack.local". | | environment | The environment of the stack, e.g "prod". | @@ -61,18 +86,20 @@ | vpc_security_group | The VPC security group ID. | | vpc_id | The VPC ID. | | ecs_cluster_security_group_id | The default ECS cluster security group ID. | +| internal_route_tables | Comma separated list of internal route table IDs. | +| external_route_tables | The external route table ID. | # bastion - The bastion host acts as the "jump point" for the rest of the infrastructure. - Since most of our instances aren't exposed to the external internet, the bastion acts as the gatekeeper for any direct SSH access. - The bastion is provisioned using the key name that you pass to the stack (and hopefully have stored somewhere). - If you ever need to access an instance directly, you can do it by "jumping through" the bastion. +The bastion host acts as the "jump point" for the rest of the infrastructure. +Since most of our instances aren't exposed to the external internet, the bastion acts as the gatekeeper for any direct SSH access. +The bastion is provisioned using the key name that you pass to the stack (and hopefully have stored somewhere). +If you ever need to access an instance directly, you can do it by "jumping through" the bastion. - $ terraform output # print the bastion ip - $ ssh -i ubuntu@ ssh ubuntu@ + $ terraform output # print the bastion ip + $ ssh -i ubuntu@ ssh ubuntu@ - Usage: +Usage: module "bastion" { source = "github.com/segmentio/stack/bastion" @@ -90,7 +117,7 @@ | Name | Description | Default | Required | |------|-------------|:-----:|:-----:| -| instance_type | Instance type, see a list at: https://aws.amazon.com/ec2/instance-types/ | `"t2.micro"` | no | +| instance_type | Instance type, see a list at: https://aws.amazon.com/ec2/instance-types/ | `t2.micro` | no | | region | AWS Region, e.g us-west-2 | - | yes | | security_groups | a comma separated lists of security group IDs | - | yes | | vpc_id | VPC ID | - | yes | @@ -106,18 +133,18 @@ # defaults - This module is used to set configuration defaults for the AWS infrastructure. - It doesn't provide much value when used on its own because terraform makes it - hard to do dynamic generations of things like subnets, for now it's used as - a helper module for the stack. +This module is used to set configuration defaults for the AWS infrastructure. +It doesn't provide much value when used on its own because terraform makes it +hard to do dynamic generations of things like subnets, for now it's used as +a helper module for the stack. - Usage: +Usage: - module "defaults" { - source = "github.com/segmentio/stack/defaults" - region = "us-east-1" - cidr = "10.0.0.0/16" - } + module "defaults" { + source = "github.com/segmentio/stack/defaults" + region = "us-east-1" + cidr = "10.0.0.0/16" + } @@ -127,8 +154,8 @@ |------|-------------|:-----:|:-----:| | region | The AWS region | - | yes | | cidr | The CIDR block to provision for the VPC | - | yes | -| default_ecs_ami | | - | yes | -| default_log_account_ids | | - | yes | +| default_ecs_ami | | `` | no | +| default_log_account_ids | # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-access-logs.html#attach-bucket-policy | `` | no | ## Outputs @@ -151,12 +178,12 @@ # dns - The dns module creates a local route53 zone that serves - as a service discovery utility. For example a service - resource with the name `auth` and a dns module - with the name `stack.local`, the service address will be `auth.stack.local`. +The dns module creates a local route53 zone that serves +as a service discovery utility. For example a service +resource with the name `auth` and a dns module +with the name `stack.local`, the service address will be `auth.stack.local`. - Usage: +Usage: module "dns" { source = "github.com/segment/stack" @@ -170,7 +197,7 @@ | Name | Description | Default | Required | |------|-------------|:-----:|:-----:| | name | Zone name, e.g stack.local | - | yes | -| vpc_id | The VPC ID (omit to create a public zone) | `""` | no | +| vpc_id | The VPC ID (omit to create a public zone) | `` | no | ## Outputs @@ -182,29 +209,29 @@ # ecs-cluster - ECS Cluster creates a cluster with the following features: +ECS Cluster creates a cluster with the following features: - - Autoscaling groups - - Instance tags for filtering - - EBS volume for docker resources + - Autoscaling groups + - Instance tags for filtering + - EBS volume for docker resources - Usage: +Usage: - module "cdn" { - source = "github.com/segmentio/stack/ecs-cluster" - environment = "prod" - name = "cdn" - vpc_id = "vpc-id" - image_id = "ami-id" - subnet_ids = "1,2" - key_name = "ssh-key" - security_groups = "1,2" - iam_instance_profile = "id" - region = "us-west-2" - availability_zones = "a,b" - instance_type = "t2.small" - } + module "cdn" { + source = "github.com/segmentio/stack/ecs-cluster" + environment = "prod" + name = "cdn" + vpc_id = "vpc-id" + image_id = "ami-id" + subnet_ids = ["1" ,"2"] + key_name = "ssh-key" + security_groups = "1,2" + iam_instance_profile = "id" + region = "us-west-2" + availability_zones = ["a", "b"] + instance_type = "t2.small" + } @@ -216,12 +243,12 @@ | environment | Environment tag, e.g prod | - | yes | | vpc_id | VPC ID | - | yes | | image_id | AMI Image ID | - | yes | -| subnet_ids | Comma separated list of subnet IDs | - | yes | +| subnet_ids | List of subnet IDs | - | yes | | key_name | SSH key name to use | - | yes | | security_groups | Comma separated list of security groups | - | yes | | iam_instance_profile | Instance profile ARN to use in the launch configuration | - | yes | | region | AWS Region | - | yes | -| availability_zones | Comma separated list of AZs | - | yes | +| availability_zones | List of AZs | - | yes | | instance_type | The instance type to use, e.g t2.small | - | yes | | instance_ebs_optimized | When set to true the instance will be launched with EBS optimized turned on | `true` | no | | min_size | Minimum instance count | `3` | no | @@ -230,8 +257,10 @@ | associate_public_ip_address | Should created instances be publicly accessible (if the SG allows) | `false` | no | | root_volume_size | Root volume size in GB | `25` | no | | docker_volume_size | Attached EBS volume size in GB | `25` | no | -| docker_auth_type | The docker auth type, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the possible values | `""` | no | -| docker_auth_data | A JSON object providing the docker auth data, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the supported formats | `""` | no | +| docker_auth_type | The docker auth type, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the possible values | `` | no | +| docker_auth_data | A JSON object providing the docker auth data, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the supported formats | `` | no | +| extra_cloud_config_type | Extra cloud config type | `text/cloud-config` | no | +| extra_cloud_config_content | Extra cloud config content | `` | no | ## Outputs @@ -242,9 +271,9 @@ # elb - The ELB module creates an ELB, security group - a route53 record and a service healthcheck. - It is used by the service module. +The ELB module creates an ELB, security group +a route53 record and a service healthcheck. +It is used by the service module. ## Inputs @@ -274,9 +303,9 @@ # iam-user - The module creates an IAM user. +The module creates an IAM user. - Usage: +Usage: module "my_user" { name = "user" @@ -313,19 +342,20 @@ | environment | The environment tag, e.g prod | - | yes | | vpc_id | The VPC ID to use | - | yes | | zone_id | The Route53 Zone ID where the DNS record will be created | - | yes | -| security_groups | A comma-separated list of security group IDs | - | yes | -| subnet_ids | A comma-separated list of subnet IDs | - | yes | -| availability_zones | A comma-separated list of availability zones | - | yes | +| security_groups | A list of security group IDs | - | yes | +| subnet_ids | A list of subnet IDs | - | yes | +| availability_zones | A list of availability zones | - | yes | | database_name | The database name | - | yes | | master_username | The master user username | - | yes | | master_password | The master user password | - | yes | -| instance_type | The type of instances that the RDS cluster will be running on | `"db.r3.large"` | no | +| instance_type | The type of instances that the RDS cluster will be running on | `db.r3.large` | no | | instance_count | How many instances will be provisioned in the RDS cluster | `1` | no | -| preferred_backup_window | The time window on which backups will be made (HH:mm-HH:mm) | `"07:00-09:00"` | no | +| preferred_backup_window | The time window on which backups will be made (HH:mm-HH:mm) | `07:00-09:00` | no | | backup_retention_period | The backup retention period | `5` | no | | publicly_accessible | When set to true the RDS cluster can be reached from outside the VPC | `false` | no | -| dns_name | Route53 record name for the RDS database, defaults to the database name if not set | `""` | no | +| dns_name | Route53 record name for the RDS database, defaults to the database name if not set | `` | no | | port | The port at which the database listens for incoming connections | `3306` | no | +| skip_final_snapshot | When set to false deletion will be delayed to take a snapshot from which the database can be recovered | `true` | no | ## Outputs @@ -355,7 +385,7 @@ # security-groups - Creates basic security groups to be used by instances and ELBs. +Creates basic security groups to be used by instances and ELBs. ## Inputs @@ -378,17 +408,17 @@ # service - The service module creates an ecs service, task definition - elb and a route53 record under the local service zone (see the dns module). +The service module creates an ecs service, task definition +elb and a route53 record under the local service zone (see the dns module). - Usage: +Usage: - module "auth_service" { - source = "github.com/segmentio/stack/service" - name = "auth-service" - image = "auth-service" - cluster = "default" - } + module "auth_service" { + source = "github.com/segmentio/stack/service" + name = "auth-service" + image = "auth-service" + cluster = "default" + } @@ -398,24 +428,26 @@ |------|-------------|:-----:|:-----:| | environment | Environment tag, e.g prod | - | yes | | image | The docker image name, e.g nginx | - | yes | -| name | The service name, if empty the service name is defaulted to the image name | `""` | no | -| version | The docker image version | `"latest"` | no | +| name | The service name, if empty the service name is defaulted to the image name | `` | no | +| version | The docker image version | `latest` | no | | subnet_ids | Comma separated list of subnet IDs that will be passed to the ELB module | - | yes | | security_groups | Comma separated list of security group IDs that will be passed to the ELB module | - | yes | | port | The container host port | - | yes | | cluster | The cluster name or ARN | - | yes | | dns_name | The DNS name to use, e.g nginx | - | yes | | log_bucket | The S3 bucket ID to use for the ELB | - | yes | -| healthcheck | Path to a healthcheck endpoint | `"/"` | no | +| healthcheck | Path to a healthcheck endpoint | `/` | no | | container_port | The container port | `3000` | no | -| command | The raw json of the task command | `"[]"` | no | -| env_vars | The raw json of the task env vars | `"[]"` | no | +| command | The raw json of the task command | `[]` | no | +| env_vars | The raw json of the task env vars | `[]` | no | | desired_count | The desired count | `2` | no | | memory | The number of MiB of memory to reserve for the container | `512` | no | | cpu | The number of cpu units to reserve for the container | `512` | no | -| protocol | The ELB protocol, HTTP or TCP | `"HTTP"` | no | +| protocol | The ELB protocol, HTTP or TCP | `HTTP` | no | | iam_role | IAM Role ARN to use | - | yes | | zone_id | The zone ID to create the record in | - | yes | +| deployment_minimum_healthy_percent | lower limit (% of desired_count) of # of running tasks during a deployment | `100` | no | +| deployment_maximum_percent | upper limit (% of desired_count) of # of running tasks during a deployment | `200` | no | ## Outputs @@ -429,15 +461,15 @@ # task - The task module creates an ECS task definition. +The task module creates an ECS task definition. - Usage: +Usage: - module "nginx" { - source = "github.com/segmentio/stack/task" - name = "nginx" - image = "nginx" - } + module "nginx" { + source = "github.com/segmentio/stack/task" + name = "nginx" + image = "nginx" + } @@ -448,11 +480,11 @@ | image | The docker image name, e.g nginx | - | yes | | name | The worker name, if empty the service name is defaulted to the image name | - | yes | | cpu | The number of cpu units to reserve for the container | `512` | no | -| env_vars | The raw json of the task env vars | `"[]"` | no | -| command | The raw json of the task command | `"[]"` | no | -| entry_point | The docker container entry point | `"[]"` | no | -| ports | The docker container ports | `"[]"` | no | -| image_version | The docker image version | `"latest"` | no | +| env_vars | The raw json of the task env vars | `[]` | no | +| command | The raw json of the task command | `[]` | no | +| entry_point | The docker container entry point | `[]` | no | +| ports | The docker container ports | `[]` | no | +| image_version | The docker image version | `latest` | no | | memory | The number of MiB of memory to reserve for the container | `512` | no | ## Outputs @@ -470,11 +502,11 @@ | Name | Description | Default | Required | |------|-------------|:-----:|:-----:| | cidr | The CIDR block for the VPC. | - | yes | -| external_subnets | Comma separated list of subnets | - | yes | -| internal_subnets | Comma separated list of subnets | - | yes | +| external_subnets | List of external subnets | - | yes | +| internal_subnets | List of internal subnets | - | yes | | environment | Environment tag, e.g prod | - | yes | -| availability_zones | Comma separated list of availability zones | - | yes | -| name | Name tag, e.g stack | `"stack"` | no | +| availability_zones | List of availability zones | - | yes | +| name | Name tag, e.g stack | `stack` | no | ## Outputs @@ -482,23 +514,26 @@ |------|-------------| | id | The VPC ID | | external_subnets | A comma-separated list of subnet IDs. | -| internal_subnets | A comma-separated list of subnet IDs. | +| internal_subnets | A list of subnet IDs. | | security_group | The default VPC security group ID. | | availability_zones | The list of availability zones of the VPC. | +| internal_rtb_id | The internal route table ID. | +| external_rtb_id | The external route table ID. | +| internal_nat_ips | The list of EIPs associated with the internal subnets. | # web-service - The web-service is similar to the `service` module, but the - it provides a __public__ ELB instead. +The web-service is similar to the `service` module, but the +it provides a __public__ ELB instead. - Usage: +Usage: - module "auth_service" { - source = "github.com/segmentio/stack/service" - name = "auth-service" - image = "auth-service" - cluster = "default" - } + module "auth_service" { + source = "github.com/segmentio/stack/service" + name = "auth-service" + image = "auth-service" + cluster = "default" + } @@ -508,8 +543,8 @@ |------|-------------|:-----:|:-----:| | environment | Environment tag, e.g prod | - | yes | | image | The docker image name, e.g nginx | - | yes | -| name | The service name, if empty the service name is defaulted to the image name | `""` | no | -| version | The docker image version | `"latest"` | no | +| name | The service name, if empty the service name is defaulted to the image name | `` | no | +| version | The docker image version | `latest` | no | | subnet_ids | Comma separated list of subnet IDs that will be passed to the ELB module | - | yes | | security_groups | Comma separated list of security group IDs that will be passed to the ELB module | - | yes | | port | The container host port | - | yes | @@ -517,17 +552,19 @@ | log_bucket | The S3 bucket ID to use for the ELB | - | yes | | ssl_certificate_id | SSL Certificate ID to use | - | yes | | iam_role | IAM Role ARN to use | - | yes | -| external_dns_name | The subdomain under which the ELB is exposed externally, defaults to the task name | `""` | no | -| internal_dns_name | The subdomain under which the ELB is exposed internally, defaults to the task name | `""` | no | +| external_dns_name | The subdomain under which the ELB is exposed externally, defaults to the task name | `` | no | +| internal_dns_name | The subdomain under which the ELB is exposed internally, defaults to the task name | `` | no | | external_zone_id | The zone ID to create the record in | - | yes | | internal_zone_id | The zone ID to create the record in | - | yes | -| healthcheck | Path to a healthcheck endpoint | `"/"` | no | +| healthcheck | Path to a healthcheck endpoint | `/` | no | | container_port | The container port | `3000` | no | -| command | The raw json of the task command | `"[]"` | no | -| env_vars | The raw json of the task env vars | `"[]"` | no | +| command | The raw json of the task command | `[]` | no | +| env_vars | The raw json of the task env vars | `[]` | no | | desired_count | The desired count | `2` | no | | memory | The number of MiB of memory to reserve for the container | `512` | no | | cpu | The number of cpu units to reserve for the container | `512` | no | +| deployment_minimum_healthy_percent | lower limit (% of desired_count) of # of running tasks during a deployment | `100` | no | +| deployment_maximum_percent | upper limit (% of desired_count) of # of running tasks during a deployment | `200` | no | ## Outputs @@ -542,17 +579,17 @@ # worker - The worker module creates an ECS service that has no ELB attached. +The worker module creates an ECS service that has no ELB attached. - Usage: +Usage: - module "my_worker" { - source = "github.com/segmentio/stack" - environment = "prod" - name = "worker" - image = "worker" - cluster = "default" - } + module "my_worker" { + source = "github.com/segmentio/stack" + environment = "prod" + name = "worker" + image = "worker" + cluster = "default" + } @@ -562,12 +599,13 @@ |------|-------------|:-----:|:-----:| | environment | Environment tag, e.g prod | - | yes | | image | The docker image name, e.g nginx | - | yes | -| name | The worker name, if empty the service name is defaulted to the image name | `""` | no | -| version | The docker image version | `"latest"` | no | +| name | The worker name, if empty the service name is defaulted to the image name | `` | no | +| version | The docker image version | `latest` | no | | cluster | The cluster name or ARN | - | yes | -| command | The raw json of the task command | `"[]"` | no | -| env_vars | The raw json of the task env vars | `"[]"` | no | +| command | The raw json of the task command | `[]` | no | +| env_vars | The raw json of the task env vars | `[]` | no | | desired_count | The desired count | `1` | no | | memory | The number of MiB of memory to reserve for the container | `512` | no | | cpu | The number of cpu units to reserve for the container | `512` | no | - +| deployment_minimum_healthy_percent | lower limit (% of desired_count) of # of running tasks during a deployment | `100` | no | +| deployment_maximum_percent | upper limit (% of desired_count) of # of running tasks during a deployment | `200` | no | diff --git a/ecs-cluster/files/cloud-config.yml.tpl b/ecs-cluster/files/cloud-config.yml.tpl index 1e07310e..14b93f34 100644 --- a/ecs-cluster/files/cloud-config.yml.tpl +++ b/ecs-cluster/files/cloud-config.yml.tpl @@ -5,5 +5,7 @@ bootcmd: - echo 'SERVER_REGION=${region}' >> /etc/environment - mkdir -p /etc/ecs + - echo 'ECS_CLUSTER=${name}' >> /etc/ecs/ecs.config - echo 'ECS_ENGINE_AUTH_TYPE=${docker_auth_type}' >> /etc/ecs/ecs.config - - echo 'ECS_ENGINE_AUTH_DATA=${docker_auth_data}' >> /etc/ecs/ecs.config + - > + echo 'ECS_ENGINE_AUTH_DATA=${docker_auth_data}' >> /etc/ecs/ecs.config diff --git a/ecs-cluster/main.tf b/ecs-cluster/main.tf index ca6267d0..4fae8565 100644 --- a/ecs-cluster/main.tf +++ b/ecs-cluster/main.tf @@ -14,12 +14,12 @@ * name = "cdn" * vpc_id = "vpc-id" * image_id = "ami-id" - * subnet_ids = "1,2" + * subnet_ids = ["1" ,"2"] * key_name = "ssh-key" * security_groups = "1,2" * iam_instance_profile = "id" * region = "us-west-2" - * availability_zones = "a,b" + * availability_zones = ["a", "b"] * instance_type = "t2.small" * } * @@ -42,7 +42,8 @@ variable "image_id" { } variable "subnet_ids" { - description = "Comma separated list of subnet IDs" + description = "List of subnet IDs" + type = "list" } variable "key_name" { @@ -62,7 +63,8 @@ variable "region" { } variable "availability_zones" { - description = "Comma separated list of AZs" + description = "List of AZs" + type = "list" } variable "instance_type" { @@ -114,6 +116,16 @@ variable "docker_auth_data" { default = "" } +variable "extra_cloud_config_type" { + description = "Extra cloud config type" + default = "text/cloud-config" +} + +variable "extra_cloud_config_content" { + description = "Extra cloud config content" + default = "" +} + resource "aws_security_group" "cluster" { name = "${var.name}-ecs-cluster" vpc_id = "${var.vpc_id}" @@ -151,7 +163,7 @@ resource "aws_ecs_cluster" "main" { } } -resource "template_file" "cloud_config" { +data "template_file" "ecs_cloud_config" { template = "${file("${path.module}/files/cloud-config.yml.tpl")}" vars { @@ -161,9 +173,20 @@ resource "template_file" "cloud_config" { docker_auth_type = "${var.docker_auth_type}" docker_auth_data = "${var.docker_auth_data}" } +} - lifecycle { - create_before_destroy = true +data "template_cloudinit_config" "cloud_config" { + gzip = false + base64_encode = false + + part { + content_type = "text/cloud-config" + content = "${data.template_file.ecs_cloud_config.rendered}" + } + + part { + content_type = "${var.extra_cloud_config_type}" + content = "${var.extra_cloud_config_content}" } } @@ -176,7 +199,7 @@ resource "aws_launch_configuration" "main" { iam_instance_profile = "${var.iam_instance_profile}" key_name = "${var.key_name}" security_groups = ["${aws_security_group.cluster.id}"] - user_data = "${template_file.cloud_config.rendered}" + user_data = "${data.template_cloudinit_config.cloud_config.rendered}" associate_public_ip_address = "${var.associate_public_ip_address}" # root @@ -200,8 +223,8 @@ resource "aws_launch_configuration" "main" { resource "aws_autoscaling_group" "main" { name = "${var.name}" - availability_zones = ["${split(",", var.availability_zones)}"] - vpc_zone_identifier = ["${split(",", var.subnet_ids)}"] + availability_zones = ["${var.availability_zones}"] + vpc_zone_identifier = ["${var.subnet_ids}"] launch_configuration = "${aws_launch_configuration.main.id}" min_size = "${var.min_size}" max_size = "${var.max_size}" diff --git a/iam-role/main.tf b/iam-role/main.tf index d06ff2d8..249a40e9 100644 --- a/iam-role/main.tf +++ b/iam-role/main.tf @@ -97,7 +97,11 @@ EOF resource "aws_iam_instance_profile" "default_ecs" { name = "ecs-instance-profile-${var.name}-${var.environment}" path = "/" - roles = ["${aws_iam_role.default_ecs_role.name}"] + role = "${aws_iam_role.default_ecs_role.name}" +} + +output "default_ecs_role_id" { + value = "${aws_iam_role.default_ecs_role.id}" } output "arn" { diff --git a/images/networking.png b/images/networking.png index e1555678..897d0f2f 100644 Binary files a/images/networking.png and b/images/networking.png differ diff --git a/main.tf b/main.tf index 38588911..45f51765 100644 --- a/main.tf +++ b/main.tf @@ -46,18 +46,18 @@ variable "cidr" { } variable "internal_subnets" { - description = "a comma-separated list of CIDRs for internal subnets in your VPC, must be set if the cidr variable is defined, needs to have as many elements as there are availability zones" - default = "10.30.0.0/19,10.30.64.0/19,10.30.128.0/19" + description = "a list of CIDRs for internal subnets in your VPC, must be set if the cidr variable is defined, needs to have as many elements as there are availability zones" + default = ["10.30.0.0/19" ,"10.30.64.0/19", "10.30.128.0/19"] } variable "external_subnets" { - description = "a comma-separated list of CIDRs for external subnets in your VPC, must be set if the cidr variable is defined, needs to have as many elements as there are availability zones" - default = "10.30.32.0/20,10.30.96.0/20,10.30.160.0/20" + description = "a list of CIDRs for external subnets in your VPC, must be set if the cidr variable is defined, needs to have as many elements as there are availability zones" + default = ["10.30.32.0/20", "10.30.96.0/20", "10.30.160.0/20"] } variable "availability_zones" { description = "a comma-separated list of availability zones, defaults to all AZ of the region, if set to something other than the defaults, both internal_subnets and external_subnets have to be defined as well" - default = "us-west-2a,us-west-2b,us-west-2c" + default = ["us-west-2a", "us-west-2b", "us-west-2c"] } variable "bastion_instance_type" { @@ -129,6 +129,24 @@ variable "ecs_ami" { default = "" } +variable "extra_cloud_config_type" { + description = "Extra cloud config type" + default = "text/cloud-config" +} + +variable "extra_cloud_config_content" { + description = "Extra cloud config content" + default = "" +} + +variable "logs_expiration_enabled" { + default = false +} + +variable "logs_expiration_days" { + default = 30 +} + module "defaults" { source = "./defaults" region = "${var.region}" @@ -159,7 +177,7 @@ module "bastion" { instance_type = "${var.bastion_instance_type}" security_groups = "${module.security_groups.external_ssh},${module.security_groups.internal_ssh}" vpc_id = "${module.vpc.id}" - subnet_id = "${element(split(",",module.vpc.external_subnets), 0)}" + subnet_id = "${element(module.vpc.external_subnets, 0)}" key_name = "${var.key_name}" environment = "${var.environment}" } @@ -204,13 +222,17 @@ module "ecs_cluster" { docker_auth_type = "${var.ecs_docker_auth_type}" docker_auth_data = "${var.ecs_docker_auth_data}" security_groups = "${coalesce(var.ecs_security_groups, format("%s,%s,%s", module.security_groups.internal_ssh, module.security_groups.internal_elb, module.security_groups.external_elb))}" + extra_cloud_config_type = "${var.extra_cloud_config_type}" + extra_cloud_config_content = "${var.extra_cloud_config_content}" } module "s3_logs" { - source = "./s3-logs" - name = "${var.name}" - environment = "${var.environment}" - account_id = "${module.defaults.s3_logs_account_id}" + source = "./s3-logs" + name = "${var.name}" + environment = "${var.environment}" + account_id = "${module.defaults.s3_logs_account_id}" + logs_expiration_enabled = "${var.logs_expiration_enabled}" + logs_expiration_days = "${var.logs_expiration_days}" } // The region in which the infra lives. @@ -253,6 +275,11 @@ output "iam_role" { value = "${module.iam_role.arn}" } +// Default ECS role ID. Useful if you want to add a new policy to that role. +output "iam_role_default_ecs_role_id" { + value = "${module.iam_role.default_ecs_role_id}" +} + // S3 bucket ID for ELB logs. output "log_bucket_id" { value = "${module.s3_logs.id}" @@ -292,3 +319,13 @@ output "vpc_id" { output "ecs_cluster_security_group_id" { value = "${module.ecs_cluster.security_group_id}" } + +// Comma separated list of internal route table IDs. +output "internal_route_tables" { + value = "${module.vpc.internal_rtb_id}" +} + +// The external route table ID. +output "external_route_tables" { + value = "${module.vpc.external_rtb_id}" +} diff --git a/packer/base/packer.yml b/packer/base/packer.yml index 0b2b78a4..fbbe6846 100644 --- a/packer/base/packer.yml +++ b/packer/base/packer.yml @@ -1,7 +1,8 @@ --- # https://www.packer.io/docs/builders/amazon-ebs.html ami: - source_ami: ami-fa82739a + source_ami: ami-e6d5d2f1 + region: us-east-1 instance_type: c4.2xlarge ssh_username: ubuntu ssh_timeout: 10m diff --git a/packer/ecs/packer.yml b/packer/ecs/packer.yml index 846c0897..7ff80a97 100644 --- a/packer/ecs/packer.yml +++ b/packer/ecs/packer.yml @@ -3,3 +3,4 @@ base: base scripts: - ecs.sh + - iam-roles.sh diff --git a/packer/ecs/root/etc/ecs/ecs.config b/packer/ecs/root/etc/ecs/ecs.config index ba92f04f..ad14f307 100644 --- a/packer/ecs/root/etc/ecs/ecs.config +++ b/packer/ecs/root/etc/ecs/ecs.config @@ -5,3 +5,4 @@ ECS_CHECKPOINT=true ECS_DATADIR=/data ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=1h ECS_AVAILABLE_LOGGING_DRIVERS=["journald"] +ECS_ENABLE_TASK_IAM_ROLE=true diff --git a/packer/ecs/root/etc/systemd/system/ecs-agent.service b/packer/ecs/root/etc/systemd/system/ecs-agent.service index 135da611..b5a6efab 100644 --- a/packer/ecs/root/etc/systemd/system/ecs-agent.service +++ b/packer/ecs/root/etc/systemd/system/ecs-agent.service @@ -14,7 +14,7 @@ ExecStartPre=/bin/mkdir -p /var/lib/ecs/data ExecStartPre=/bin/mkdir -p /var/log/ecs ExecStartPre=-/usr/bin/docker kill ecs-agent ExecStartPre=-/usr/bin/docker rm ecs-agent -ExecStartPre=-/usr/bin/docker pull amazon/amazon-ecs-agent:v1.9.0 +ExecStartPre=-/usr/bin/docker pull amazon/amazon-ecs-agent:v1.14.1 ExecStart=/usr/bin/docker run \ --name=ecs-agent \ --restart=on-failure:10 \ @@ -24,9 +24,10 @@ ExecStart=/usr/bin/docker run \ --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro \ --volume=/var/run/docker/execdriver/native:/var/lib/docker/execdriver/native:ro \ --publish=127.0.0.1:51678:51678 \ + --publish=127.0.0.1:51679:51679 \ --env-file=/etc/ecs/ecs.config \ --env=ECS_CLUSTER=${SERVER_GROUP} \ - amazon/amazon-ecs-agent:v1.9.0 + amazon/amazon-ecs-agent:v1.14.1 ExecStop=-/usr/bin/docker stop ecs-agent [Install] diff --git a/packer/ecs/root/etc/systemd/system/ecs-logs.service b/packer/ecs/root/etc/systemd/system/ecs-logs.service index 3481001f..6076ba18 100644 --- a/packer/ecs/root/etc/systemd/system/ecs-logs.service +++ b/packer/ecs/root/etc/systemd/system/ecs-logs.service @@ -11,12 +11,12 @@ RestartPreventExitStatus=5 SyslogIdentifier=ecs-logs ExecStartPre=-/usr/bin/docker kill ecs-logs ExecStartPre=-/usr/bin/docker rm ecs-logs -ExecStartPre=-/usr/bin/docker pull segment/ecs-logs:latest +ExecStartPre=-/usr/bin/docker pull segment/ecs-logs:0.1.1 ExecStart=/usr/bin/docker run \ --name=ecs-logs \ --restart=on-failure:10 \ --volume=/run/log/journal:/run/log/journal:ro \ - segment/ecs-logs:latest -src journald -dst cloudwatchlogs + segment/ecs-logs:0.1.1 -src journald -dst cloudwatchlogs ExecStop=-/usr/bin/docker stop ecs-logs [Install] diff --git a/packer/ecs/scripts/iam-roles.sh b/packer/ecs/scripts/iam-roles.sh new file mode 100755 index 00000000..91218328 --- /dev/null +++ b/packer/ecs/scripts/iam-roles.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +# Allow us to route traffic that uses 127.0.0.1 +echo "net.ipv4.conf.all.route_localnet=1" | tee --append /etc/sysctl.conf + +# install iptables-persistent to persist iptables rules across reboots +export DEBIAN_FRONTEND=noninteractive +apt-get install -y iptables-persistent +# iptables-persistent is really named netfilter-persistent in 16.04 +invoke-rc.d netfilter-persistent save +systemctl stop netfilter-persistent.service + +# setup iptables rules to allow for Task IAM Roles +iptables -t nat -A PREROUTING -p tcp -d 169.254.170.2 --dport 80 -j DNAT --to-destination 127.0.0.1:51679 +iptables -t nat -A OUTPUT -d 169.254.170.2 -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 51679 + +# Save iptables rules +netfilter-persistent save diff --git a/rds-cluster/main.tf b/rds-cluster/main.tf index b6cb253f..97157335 100644 --- a/rds-cluster/main.tf +++ b/rds-cluster/main.tf @@ -15,15 +15,18 @@ variable "zone_id" { } variable "security_groups" { - description = "A comma-separated list of security group IDs" + description = "A list of security group IDs" + type = "list" } variable "subnet_ids" { - description = "A comma-separated list of subnet IDs" + description = "A list of subnet IDs" + type = "list" } variable "availability_zones" { - description = "A comma-separated list of availability zones" + description = "A list of availability zones" + type = "list" } variable "database_name" { @@ -73,6 +76,11 @@ variable "port" { default = 3306 } +variable "skip_final_snapshot" { + description = "When set to false deletion will be delayed to take a snapshot from which the database can be recovered" + default = true +} + resource "aws_security_group" "main" { name = "${var.name}-rds-cluster" description = "Allows traffic to rds from other security groups" @@ -82,7 +90,7 @@ resource "aws_security_group" "main" { from_port = "${var.port}" to_port = "${var.port}" protocol = "TCP" - security_groups = ["${split(",", var.security_groups)}"] + security_groups = ["${var.security_groups}"] } egress { @@ -101,7 +109,7 @@ resource "aws_security_group" "main" { resource "aws_db_subnet_group" "main" { name = "${var.name}" description = "RDS cluster subnet group" - subnet_ids = ["${split(",", var.subnet_ids)}"] + subnet_ids = ["${var.subnet_ids}"] } resource "aws_rds_cluster_instance" "cluster_instances" { @@ -110,19 +118,24 @@ resource "aws_rds_cluster_instance" "cluster_instances" { cluster_identifier = "${aws_rds_cluster.main.id}" publicly_accessible = "${var.publicly_accessible}" instance_class = "${var.instance_type}" + + # need a deterministic identifier or terraform will force a new resource every apply + identifier = "${aws_rds_cluster.main.id}-${count.index}" } resource "aws_rds_cluster" "main" { - cluster_identifier = "${var.name}" - availability_zones = ["${split(",", var.availability_zones)}"] - database_name = "${var.database_name}" - master_username = "${var.master_username}" - master_password = "${var.master_password}" - backup_retention_period = "${var.backup_retention_period}" - preferred_backup_window = "${var.preferred_backup_window}" - vpc_security_group_ids = ["${aws_security_group.main.id}"] - db_subnet_group_name = "${aws_db_subnet_group.main.id}" - port = "${var.port}" + cluster_identifier = "${var.name}" + availability_zones = ["${var.availability_zones}"] + database_name = "${var.database_name}" + master_username = "${var.master_username}" + master_password = "${var.master_password}" + backup_retention_period = "${var.backup_retention_period}" + preferred_backup_window = "${var.preferred_backup_window}" + vpc_security_group_ids = ["${aws_security_group.main.id}"] + db_subnet_group_name = "${aws_db_subnet_group.main.id}" + port = "${var.port}" + skip_final_snapshot = "${var.skip_final_snapshot}" + final_snapshot_identifier = "${var.name}-finalsnapshot" } resource "aws_route53_record" "main" { diff --git a/rds/main.tf b/rds/main.tf new file mode 100644 index 00000000..cda04196 --- /dev/null +++ b/rds/main.tf @@ -0,0 +1,190 @@ +variable "name" { + description = "RDS instance name" +} + +variable "engine" { + description = "Database engine: mysql, postgres, etc." + default = "postgres" +} + +variable "engine_version" { + description = "Database version" + default = "9.6.1" +} + +variable "port" { + description = "Port for database to listen on" + default = 5432 +} + +variable "database" { + description = "The database name for the RDS instance (if not specified, `var.name` will be used)" + default = "" +} + +variable "username" { + description = "The username for the RDS instance (if not specified, `var.name` will be used)" + default = "" +} + +variable "password" { + description = "Postgres user password" +} + +variable "multi_az" { + description = "If true, database will be placed in multiple AZs for HA" + default = false +} + +variable "backup_retention_period" { + description = "Backup retention, in days" + default = 5 +} + +variable "backup_window" { + description = "Time window for backups." + default = "00:00-01:00" +} + +variable "maintenance_window" { + description = "Time window for maintenance." + default = "Mon:01:00-Mon:02:00" +} + +variable "monitoring_interval" { + description = "Seconds between enhanced monitoring metric collection. 0 disables enhanced monitoring." + default = "0" +} + +variable "monitoring_role_arn" { + description = "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to CloudWatch Logs. Required if monitoring_interval > 0." + default = "" +} + +variable "apply_immediately" { + description = "If false, apply changes during maintenance window" + default = true +} + +variable "allow_major_version_upgrade" { + description = "If true, major version upgrades are allowed" + default = false +} + +variable "instance_class" { + description = "Underlying instance type" + default = "db.t2.micro" +} + +variable "storage_type" { + description = "Storage type: standard, gp2, or io1" + default = "gp2" +} + +variable "allocated_storage" { + description = "Disk size, in GB" + default = 10 +} + +variable "publicly_accessible" { + description = "If true, the RDS instance will be open to the internet" + default = false +} + +variable "vpc_id" { + description = "The VPC ID to use" +} + +variable "ingress_allow_security_groups" { + description = "A list of security group IDs to allow traffic from" + type = "list" + default = [] +} + +variable "ingress_allow_cidr_blocks" { + description = "A list of CIDR blocks to allow traffic from" + type = "list" + default = [] +} + +variable "subnet_ids" { + description = "A list of subnet IDs" + type = "list" +} + +resource "aws_security_group" "main" { + name = "${var.name}-rds" + description = "Allows traffic to RDS from other security groups" + vpc_id = "${var.vpc_id}" + + ingress { + from_port = "${var.port}" + to_port = "${var.port}" + protocol = "TCP" + security_groups = ["${var.ingress_allow_security_groups}"] + } + + ingress { + from_port = "${var.port}" + to_port = "${var.port}" + protocol = "TCP" + cidr_blocks = ["${var.ingress_allow_cidr_blocks}"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = -1 + cidr_blocks = ["0.0.0.0/0"] + } + + tags { + Name = "RDS (${var.name})" + } +} + +resource "aws_db_subnet_group" "main" { + name = "${var.name}" + description = "RDS subnet group" + subnet_ids = ["${var.subnet_ids}"] +} + +resource "aws_db_instance" "main" { + identifier = "${var.name}" + + # Database + engine = "${var.engine}" + engine_version = "${var.engine_version}" + allow_major_version_upgrade = "${var.allow_major_version_upgrade}" + username = "${coalesce(var.username, var.name)}" + password = "${var.password}" + multi_az = "${var.multi_az}" + name = "${coalesce(var.database, var.name)}" + + # Backups / maintenance + backup_retention_period = "${var.backup_retention_period}" + backup_window = "${var.backup_window}" + maintenance_window = "${var.maintenance_window}" + monitoring_interval = "${var.monitoring_interval}" + monitoring_role_arn = "${var.monitoring_role_arn}" + apply_immediately = "${var.apply_immediately}" + final_snapshot_identifier = "${var.name}-finalsnapshot" + + # Hardware + instance_class = "${var.instance_class}" + storage_type = "${var.storage_type}" + allocated_storage = "${var.allocated_storage}" + + # Network / security + db_subnet_group_name = "${aws_db_subnet_group.main.id}" + vpc_security_group_ids = ["${aws_security_group.main.id}"] + publicly_accessible = "${var.publicly_accessible}" +} + +output "addr" { + value = "${aws_db_instance.main.engine}://${aws_db_instance.main.username}:${aws_db_instance.main.password}@${aws_db_instance.main.endpoint}" +} + +output "url" { + value = "${aws_db_instance.main.engine}://${aws_db_instance.main.username}:${aws_db_instance.main.password}@${aws_db_instance.main.endpoint}/${aws_db_instance.main.name}" +} diff --git a/s3-logs/main.tf b/s3-logs/main.tf index caf0da0b..f3e582f4 100644 --- a/s3-logs/main.tf +++ b/s3-logs/main.tf @@ -7,7 +7,15 @@ variable "environment" { variable "account_id" { } -resource "template_file" "policy" { +variable "logs_expiration_enabled" { + default = false +} + +variable "logs_expiration_days" { + default = 30 +} + +data "template_file" "policy" { template = "${file("${path.module}/policy.json")}" vars = { @@ -19,12 +27,22 @@ resource "template_file" "policy" { resource "aws_s3_bucket" "logs" { bucket = "${var.name}-${var.environment}-logs" + lifecycle_rule { + id = "logs-expiration" + prefix = "" + enabled = "${var.logs_expiration_enabled}" + + expiration { + days = "${var.logs_expiration_days}" + } + } + tags { Name = "${var.name}-${var.environment}-logs" Environment = "${var.environment}" } - policy = "${template_file.policy.rendered}" + policy = "${data.template_file.policy.rendered}" } output "id" { diff --git a/scripts/test.sh b/scripts/test.sh index 301241f9..b11e95bc 100644 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -modules=$(ls -1 */*.tf | xargs -I % dirname %) +modules=$(find -mindepth 2 -name *.tf -printf '%P\n' | xargs -I % dirname %) (terraform validate . && echo "√ stack") || exit 1 diff --git a/security-groups/main.tf b/security-groups/main.tf index 9bee46b8..a3b6a7eb 100644 --- a/security-groups/main.tf +++ b/security-groups/main.tf @@ -50,7 +50,7 @@ resource "aws_security_group" "internal_elb" { resource "aws_security_group" "external_elb" { name = "${format("%s-%s-external-elb", var.name, var.environment)}" vpc_id = "${var.vpc_id}" - description = "Allows internal ELB traffic" + description = "Allows external ELB traffic" ingress { from_port = 80 diff --git a/service/main.tf b/service/main.tf index a792d5f4..8f237be1 100644 --- a/service/main.tf +++ b/service/main.tf @@ -111,16 +111,28 @@ variable "zone_id" { description = "The zone ID to create the record in" } +variable "deployment_minimum_healthy_percent" { + description = "lower limit (% of desired_count) of # of running tasks during a deployment" + default = 100 +} + +variable "deployment_maximum_percent" { + description = "upper limit (% of desired_count) of # of running tasks during a deployment" + default = 200 +} + /** * Resources. */ resource "aws_ecs_service" "main" { - name = "${module.task.name}" - cluster = "${var.cluster}" - task_definition = "${module.task.arn}" - desired_count = "${var.desired_count}" - iam_role = "${var.iam_role}" + name = "${module.task.name}" + cluster = "${var.cluster}" + task_definition = "${module.task.arn}" + desired_count = "${var.desired_count}" + iam_role = "${var.iam_role}" + deployment_minimum_healthy_percent = "${var.deployment_minimum_healthy_percent}" + deployment_maximum_percent = "${var.deployment_maximum_percent}" load_balancer { elb_name = "${module.elb.id}" diff --git a/task/main.tf b/task/main.tf index c2ed0a5f..8fe201d3 100644 --- a/task/main.tf +++ b/task/main.tf @@ -62,6 +62,16 @@ variable "memory" { default = 512 } +variable "log_driver" { + description = "The log driver to use use for the container" + default = "journald" +} + +variable "role" { + description = "The IAM Role to assign to the Container" + default = "" +} + /** * Resources. */ @@ -69,7 +79,8 @@ variable "memory" { # The ECS task definition. resource "aws_ecs_task_definition" "main" { - family = "${var.name}" + family = "${var.name}" + task_role_arn = "${var.role}" lifecycle { ignore_changes = ["image"] @@ -90,7 +101,7 @@ resource "aws_ecs_task_definition" "main" { "entryPoint": ${var.entry_point}, "mountPoints": [], "logConfiguration": { - "logDriver": "journald", + "logDriver": "${var.log_driver}", "options": { "tag": "${var.name}" } @@ -113,3 +124,8 @@ output "name" { output "arn" { value = "${aws_ecs_task_definition.main.arn}" } + +// The revision number of the task definition +output "revision" { + value = "${aws_ecs_task_definition.main.revision}" +} diff --git a/tools/readme.md b/tools/readme.md new file mode 100644 index 00000000..14994083 --- /dev/null +++ b/tools/readme.md @@ -0,0 +1,12 @@ +# Prerequisites: +- AWS Cli configured with your credentials + +# Usage: +- Go to tools directory `cd tools/` +- Generate Base AMI image `./pack-ami build -t base -p ../packer/` +- Generate ECS AMI image `./pack-ami build -t ecs -p ../packer/` +- Your new Base and ECS AMIs are available in your AWS account +- You can delete the Base AMI image +- Set the ECS AMI Permission to public +- Edit the `defaults/maint.tf` file and set the new ECS AMI ID corresponding to the zone where your AMI has been created +- Copy your ECS AMI to each zone available in `defaults/maint.tf` AND DO NOT FORGET TO MAKE THEM PUBLIC TOO. diff --git a/tools/roll-ami b/tools/roll-ami index c71cde3d..a8ca6bad 100755 --- a/tools/roll-ami +++ b/tools/roll-ami @@ -6,9 +6,12 @@ import queue import sys import threading import time +import copy asg = boto3.client('autoscaling') ec2 = boto3.client('ec2') +ecs = boto3.client('ecs') + def main(argv): options = parse_arguments(argv) @@ -108,6 +111,28 @@ def run(options, step_size=1, launch_config=None, force=False, ignore_instances= info('%s - running', instance['InstanceId']) tmp_instances.remove(instance['InstanceId']) + container_instance_id_map = build_container_instance_map(options.cluster) + + # Drain old instances + info('draining ECS container instances...') + for instance in old_instances: + info('%s - draining', instance) + ecs.update_container_instances_state( + cluster=options.cluster, + containerInstances=[container_instance_id_map[instance] for instance in old_instances], + status='DRAINING') + + draining_instances = copy.copy(old_instances) + while draining_instances: + time.sleep(5) + ci_info = ecs.describe_container_instances( + cluster=options.cluster, + containerInstances=[container_instance_id_map[instance] for instance in draining_instances]) + for container_instance in ci_info['containerInstances']: + if container_instance['runningTasksCount'] == 0: + draining_instances.remove(container_instance['ec2InstanceId']) + info('%s - drained', container_instance['ec2InstanceId']) + # Terminates the old instances that aren't necessary anymore (the ones # that were picked by the iterator). for instance in old_instances: @@ -209,6 +234,16 @@ def filter_new_instances(instances, group_instances): new_instances.sort() return new_instances +def build_container_instance_map(cluster): + container_instances = ecs.list_container_instances(cluster=cluster) + full_info = ecs.describe_container_instances(cluster=cluster, + containerInstances=container_instances['containerInstanceArns']) + + container_instance_map = {} + for ci in full_info['containerInstances']: + container_instance_map[ci['ec2InstanceId']] = ci['containerInstanceArn'] + return container_instance_map + def iter_instance_groups(instances, group_size): while instances: n = min(group_size, len(instances)) diff --git a/vpc/main.tf b/vpc/main.tf index 12557126..005c9077 100644 --- a/vpc/main.tf +++ b/vpc/main.tf @@ -3,11 +3,13 @@ variable "cidr" { } variable "external_subnets" { - description = "Comma separated list of subnets" + description = "List of external subnets" + type = "list" } variable "internal_subnets" { - description = "Comma separated list of subnets" + description = "List of internal subnets" + type = "list" } variable "environment" { @@ -15,7 +17,8 @@ variable "environment" { } variable "availability_zones" { - description = "Comma separated list of availability zones" + description = "List of availability zones" + type = "list" } variable "name" { @@ -23,6 +26,41 @@ variable "name" { default = "stack" } +variable "use_nat_instances" { + description = "If true, use EC2 NAT instances instead of the AWS NAT gateway service." + default = false +} + +variable "nat_instance_type" { + description = "Only if use_nat_instances is true, which EC2 instance type to use for the NAT instances." + default = "t2.nano" +} + +variable "use_eip_with_nat_instances" { + description = "Only if use_nat_instances is true, whether to assign Elastic IPs to the NAT instances. IF this is set to false, NAT instances use dynamically assigned IPs." + default = false +} + +# This data source returns the newest Amazon NAT instance AMI +data "aws_ami" "nat_ami" { + most_recent = true + + filter { + name = "owner-alias" + values = ["amazon"] + } + + filter { + name = "name" + values = ["amzn-ami-vpc-nat*"] + } +} + +variable "nat_instance_ssh_key_name" { + description = "Only if use_nat_instance is true, the optional SSH key-pair to assign to NAT instances." + default = "" +} + /** * VPC */ @@ -52,15 +90,92 @@ resource "aws_internet_gateway" "main" { } resource "aws_nat_gateway" "main" { - count = "${length(compact(split(",", var.internal_subnets)))}" + # Only create this if not using NAT instances. + count = "${(1 - var.use_nat_instances) * length(var.internal_subnets)}" allocation_id = "${element(aws_eip.nat.*.id, count.index)}" subnet_id = "${element(aws_subnet.external.*.id, count.index)}" depends_on = ["aws_internet_gateway.main"] } resource "aws_eip" "nat" { - count = "${length(compact(split(",", var.internal_subnets)))}" - vpc = true + # Create these only if: + # NAT instances are used and Elastic IPs are used with them, + # or if the NAT gateway service is used (NAT instances are not used). + count = "${signum((var.use_nat_instances * var.use_eip_with_nat_instances) + (var.use_nat_instances == 0 ? 1 : 0)) * length(var.internal_subnets)}" + + vpc = true +} + +resource "aws_security_group" "nat_instances" { + # Create this only if using NAT instances, vs. the NAT gateway service. + count = "${0 + var.use_nat_instances}" + name = "nat" + description = "Allow traffic from clients into NAT instances" + + ingress { + from_port = 0 + to_port = 65535 + protocol = "udp" + cidr_blocks = "${var.internal_subnets}" + } + + ingress { + from_port = 0 + to_port = 65535 + protocol = "tcp" + cidr_blocks = "${var.internal_subnets}" + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + vpc_id = "${aws_vpc.main.id}" +} + +resource "aws_instance" "nat_instance" { + # Create these only if using NAT instances, vs. the NAT gateway service. + count = "${(0 + var.use_nat_instances) * length(var.internal_subnets)}" + availability_zone = "${element(var.availability_zones, count.index)}" + + tags { + Name = "${var.name}-${format("internal-%03d NAT", count.index+1)}" + Environment = "${var.environment}" + } + + volume_tags { + Name = "${var.name}-${format("internal-%03d NAT", count.index+1)}" + Environment = "${var.environment}" + } + + key_name = "${var.nat_instance_ssh_key_name}" + ami = "${data.aws_ami.nat_ami.id}" + instance_type = "${var.nat_instance_type}" + source_dest_check = false + + # associate_public_ip_address is not used,, + # as public subnets have map_public_ip_on_launch set to true. + # Also, using associate_public_ip_address causes issues with + # stopped NAT instances which do not use an Elastic IP. + # - For more details: https://github.com/terraform-providers/terraform-provider-aws/issues/343 + subnet_id = "${element(aws_subnet.external.*.id, count.index)}" + + vpc_security_group_ids = ["${aws_security_group.nat_instances.id}"] + + lifecycle { + # Ignore changes to the NAT AMI data source. + ignore_changes = ["ami"] + } +} + +resource "aws_eip_association" "nat_instance_eip" { + # Create these only if using NAT instances, vs. the NAT gateway service. + count = "${(0 + (var.use_nat_instances * var.use_eip_with_nat_instances)) * length(var.internal_subnets)}" + instance_id = "${element(aws_instance.nat_instance.*.id, count.index)}" + allocation_id = "${element(aws_eip.nat.*.id, count.index)}" } /** @@ -69,24 +184,26 @@ resource "aws_eip" "nat" { resource "aws_subnet" "internal" { vpc_id = "${aws_vpc.main.id}" - cidr_block = "${element(split(",", var.internal_subnets), count.index)}" - availability_zone = "${element(split(",", var.availability_zones), count.index)}" - count = "${length(compact(split(",", var.internal_subnets)))}" + cidr_block = "${element(var.internal_subnets, count.index)}" + availability_zone = "${element(var.availability_zones, count.index)}" + count = "${length(var.internal_subnets)}" tags { - Name = "${var.name}-${format("internal-%03d", count.index+1)}" + Name = "${var.name}-${format("internal-%03d", count.index+1)}" + Environment = "${var.environment}" } } resource "aws_subnet" "external" { vpc_id = "${aws_vpc.main.id}" - cidr_block = "${element(split(",", var.external_subnets), count.index)}" - availability_zone = "${element(split(",", var.availability_zones), count.index)}" - count = "${length(compact(split(",", var.external_subnets)))}" + cidr_block = "${element(var.external_subnets, count.index)}" + availability_zone = "${element(var.availability_zones, count.index)}" + count = "${length(var.external_subnets)}" map_public_ip_on_launch = true tags { - Name = "${var.name}-${format("external-%03d", count.index+1)}" + Name = "${var.name}-${format("external-%03d", count.index+1)}" + Environment = "${var.environment}" } } @@ -97,42 +214,55 @@ resource "aws_subnet" "external" { resource "aws_route_table" "external" { vpc_id = "${aws_vpc.main.id}" - route { - cidr_block = "0.0.0.0/0" - gateway_id = "${aws_internet_gateway.main.id}" - } - tags { - Name = "${var.name}-external-001" + Name = "${var.name}-external-001" + Environment = "${var.environment}" } } +resource "aws_route" "external" { + route_table_id = "${aws_route_table.external.id}" + destination_cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.main.id}" +} + resource "aws_route_table" "internal" { - count = "${length(compact(split(",", var.internal_subnets)))}" + count = "${length(var.internal_subnets)}" vpc_id = "${aws_vpc.main.id}" - route { - cidr_block = "0.0.0.0/0" - nat_gateway_id = "${element(aws_nat_gateway.main.*.id, count.index)}" - } - tags { - Name = "${var.name}-${format("internal-%03d", count.index+1)}" + Name = "${var.name}-${format("internal-%03d", count.index+1)}" + Environment = "${var.environment}" } } +resource "aws_route" "internal" { + # Create this only if using the NAT gateway service, vs. NAT instances. + count = "${(1 - var.use_nat_instances) * length(compact(var.internal_subnets))}" + route_table_id = "${element(aws_route_table.internal.*.id, count.index)}" + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = "${element(aws_nat_gateway.main.*.id, count.index)}" +} + +resource "aws_route" "internal_nat_instance" { + count = "${(0 + var.use_nat_instances) * length(compact(var.internal_subnets))}" + route_table_id = "${element(aws_route_table.internal.*.id, count.index)}" + destination_cidr_block = "0.0.0.0/0" + instance_id = "${element(aws_instance.nat_instance.*.id, count.index)}" +} + /** * Route associations */ resource "aws_route_table_association" "internal" { - count = "${length(compact(split(",", var.internal_subnets)))}" + count = "${length(var.internal_subnets)}" subnet_id = "${element(aws_subnet.internal.*.id, count.index)}" route_table_id = "${element(aws_route_table.internal.*.id, count.index)}" } resource "aws_route_table_association" "external" { - count = "${length(compact(split(",", var.external_subnets)))}" + count = "${length(var.external_subnets)}" subnet_id = "${element(aws_subnet.external.*.id, count.index)}" route_table_id = "${aws_route_table.external.id}" } @@ -146,14 +276,19 @@ output "id" { value = "${aws_vpc.main.id}" } +// The VPC CIDR +output "cidr_block" { + value = "${aws_vpc.main.cidr_block}" +} + // A comma-separated list of subnet IDs. output "external_subnets" { - value = "${join(",", aws_subnet.external.*.id)}" + value = ["${aws_subnet.external.*.id}"] } -// A comma-separated list of subnet IDs. +// A list of subnet IDs. output "internal_subnets" { - value = "${join(",", aws_subnet.internal.*.id)}" + value = ["${aws_subnet.internal.*.id}"] } // The default VPC security group ID. @@ -163,5 +298,20 @@ output "security_group" { // The list of availability zones of the VPC. output "availability_zones" { - value = "${join(",", aws_subnet.external.*.availability_zone)}" + value = ["${aws_subnet.external.*.availability_zone}"] +} + +// The internal route table ID. +output "internal_rtb_id" { + value = "${join(",", aws_route_table.internal.*.id)}" +} + +// The external route table ID. +output "external_rtb_id" { + value = "${aws_route_table.external.id}" +} + +// The list of EIPs associated with the internal subnets. +output "internal_nat_ips" { + value = ["${aws_eip.nat.*.public_ip}"] } diff --git a/web-service/main.tf b/web-service/main.tf index ec48b36e..95b43db5 100644 --- a/web-service/main.tf +++ b/web-service/main.tf @@ -120,16 +120,28 @@ variable "cpu" { default = 512 } +variable "deployment_minimum_healthy_percent" { + description = "lower limit (% of desired_count) of # of running tasks during a deployment" + default = 100 +} + +variable "deployment_maximum_percent" { + description = "upper limit (% of desired_count) of # of running tasks during a deployment" + default = 200 +} + /** * Resources. */ resource "aws_ecs_service" "main" { - name = "${module.task.name}" - cluster = "${var.cluster}" - task_definition = "${module.task.arn}" - desired_count = "${var.desired_count}" - iam_role = "${var.iam_role}" + name = "${module.task.name}" + cluster = "${var.cluster}" + task_definition = "${module.task.arn}" + desired_count = "${var.desired_count}" + iam_role = "${var.iam_role}" + deployment_minimum_healthy_percent = "${var.deployment_minimum_healthy_percent}" + deployment_maximum_percent = "${var.deployment_maximum_percent}" load_balancer { elb_name = "${module.elb.id}" diff --git a/worker/main.tf b/worker/main.tf index 33f56883..1a65285a 100644 --- a/worker/main.tf +++ b/worker/main.tf @@ -68,15 +68,27 @@ variable "cpu" { default = 512 } +variable "deployment_minimum_healthy_percent" { + description = "lower limit (% of desired_count) of # of running tasks during a deployment" + default = 100 +} + +variable "deployment_maximum_percent" { + description = "upper limit (% of desired_count) of # of running tasks during a deployment" + default = 200 +} + /** * Resources. */ resource "aws_ecs_service" "main" { - name = "${module.task.name}" - cluster = "${var.cluster}" - task_definition = "${module.task.arn}" - desired_count = "${var.desired_count}" + name = "${module.task.name}" + cluster = "${var.cluster}" + task_definition = "${module.task.arn}" + desired_count = "${var.desired_count}" + deployment_minimum_healthy_percent = "${var.deployment_minimum_healthy_percent}" + deployment_maximum_percent = "${var.deployment_maximum_percent}" lifecycle { create_before_destroy = true