Skip to content

Commit

Permalink
Convert to TF 0.12. Add tests. Add Codefresh test pipeline (#22)
Browse files Browse the repository at this point in the history
* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Convert to TF 0.12

* Add test for worker nodes joining the cluster

* Add test for worker nodes joining the cluster

* Add test for worker nodes joining the cluster

* Update test.yaml

* Update test.yml

* Update test.yml

* Update tests

* Update ConfigMap

* Update ConfigMap

* Update README

* Update template

* Update tests
  • Loading branch information
aknysh authored Oct 1, 2019
1 parent 87efb2e commit 6465545
Show file tree
Hide file tree
Showing 27 changed files with 1,114 additions and 598 deletions.
3 changes: 0 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,6 @@
*.tfstate
*.tfstate.*

# .tfvars files
*.tfvars

**/.idea
**/*.iml

Expand Down
16 changes: 0 additions & 16 deletions .travis.yml

This file was deleted.

222 changes: 122 additions & 100 deletions README.md

Large diffs are not rendered by default.

180 changes: 98 additions & 82 deletions README.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ github_repo: cloudposse/terraform-aws-eks-cluster

# Badges to display
badges:
- name: "Build Status"
image: "https://travis-ci.org/cloudposse/terraform-aws-eks-cluster.svg?branch=master"
url: "https://travis-ci.org/cloudposse/terraform-aws-eks-cluster"
- name: "Codefresh Build Status"
image: "https://g.codefresh.io/api/badges/pipeline/cloudposse/terraform-modules%2Fterraform-aws-eks-cluster?type=cf-1"
url: "https://g.codefresh.io/public/accounts/cloudposse/pipelines/5d8cd583941e46a098d3992d"
- name: "Latest Release"
image: "https://img.shields.io/github/release/cloudposse/terraform-aws-eks-cluster.svg"
url: "https://github.com/cloudposse/terraform-aws-eks-cluster/releases/latest"
Expand All @@ -41,7 +41,7 @@ related:
- name: "terraform-aws-ecs-alb-service-task"
description: "Terraform module which implements an ECS service which exposes a web service via ALB"
url: "https://github.com/cloudposse/terraform-aws-ecs-alb-service-task"
- name: "erraform-aws-ecs-web-app"
- name: "terraform-aws-ecs-web-app"
description: "Terraform module that implements a web app on ECS and supports autoscaling, CI/CD, monitoring, ALB integration, and much more"
url: "https://github.com/cloudposse/terraform-aws-ecs-web-app"
- name: "terraform-aws-ecs-codepipeline"
Expand Down Expand Up @@ -82,84 +82,96 @@ usage: |-
- [terraform-root-modules/eks-backing-services-peering](https://github.com/cloudposse/terraform-root-modules/tree/master/aws/eks-backing-services-peering) - example of VPC peering between the EKS VPC and backing services VPC
```hcl
provider "aws" {
region = "us-west-1"
}
variable "tags" {
type = "map"
default = {}
description = "Additional tags (e.g. map('BusinessUnit','XYZ')"
}
locals {
# The usage of the specific kubernetes.io/cluster/* resource tags below are required
# for EKS and Kubernetes to discover and manage networking resources
# https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html#base-vpc-networking
tags = "${merge(var.tags, map("kubernetes.io/cluster/eg-testing-cluster", "shared"))}"
}
module "vpc" {
source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=master"
namespace = "eg"
stage = "testing"
name = "cluster"
tags = "${local.tags}"
cidr_block = "10.0.0.0/16"
}
module "subnets" {
source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=master"
availability_zones = ["us-west-1a", "us-west-1b", "us-west-1c", "us-west-1d"]
namespace = "eg"
stage = "testing"
name = "cluster"
tags = "${local.tags}"
region = "us-west-1"
vpc_id = "${module.vpc.vpc_id}"
igw_id = "${module.vpc.igw_id}"
cidr_block = "${module.vpc.vpc_cidr_block}"
nat_gateway_enabled = "true"
}
module "eks_cluster" {
source = "git::https://github.com/cloudposse/terraform-aws-eks-cluster.git?ref=master"
namespace = "eg"
stage = "testing"
name = "cluster"
tags = "${var.tags}"
vpc_id = "${module.vpc.vpc_id}"
subnet_ids = ["${module.subnets.public_subnet_ids}"]
# `workers_security_group_count` is needed to prevent `count can't be computed` errors
workers_security_group_ids = ["${module.eks_workers.security_group_id}"]
workers_security_group_count = 1
}
module "eks_workers" {
source = "git::https://github.com/cloudposse/terraform-aws-eks-workers.git?ref=master"
namespace = "eg"
stage = "testing"
name = "cluster"
tags = "${var.tags}"
instance_type = "t2.medium"
vpc_id = "${module.vpc.vpc_id}"
subnet_ids = ["${module.subnets.public_subnet_ids}"]
health_check_type = "EC2"
min_size = 1
max_size = 3
wait_for_capacity_timeout = "10m"
associate_public_ip_address = true
cluster_name = "eg-testing-cluster"
cluster_endpoint = "${module.eks_cluster.eks_cluster_endpoint}"
cluster_certificate_authority_data = "${module.eks_cluster.eks_cluster_certificate_authority_data}"
cluster_security_group_id = "${module.eks_cluster.security_group_id}"
# Auto-scaling policies and CloudWatch metric alarms
autoscaling_policies_enabled = "true"
cpu_utilization_high_threshold_percent = "80"
cpu_utilization_low_threshold_percent = "20"
}
provider "aws" {
region = var.region
}
module "label" {
source = "git::https://github.com/cloudposse/terraform-null-label.git?ref=master"
namespace = var.namespace
name = var.name
stage = var.stage
delimiter = var.delimiter
attributes = compact(concat(var.attributes, list("cluster")))
tags = var.tags
}
locals {
# The usage of the specific kubernetes.io/cluster/* resource tags below are required
# for EKS and Kubernetes to discover and manage networking resources
# https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html#base-vpc-networking
tags = merge(var.tags, map("kubernetes.io/cluster/${module.label.id}", "shared"))
}
module "vpc" {
source = "git::https://github.com/cloudposse/terraform-aws-vpc.git?ref=master"
namespace = var.namespace
stage = var.stage
name = var.name
attributes = var.attributes
cidr_block = "172.16.0.0/16"
tags = local.tags
}
module "subnets" {
source = "git::https://github.com/cloudposse/terraform-aws-dynamic-subnets.git?ref=master"
availability_zones = var.availability_zones
namespace = var.namespace
stage = var.stage
name = var.name
attributes = var.attributes
vpc_id = module.vpc.vpc_id
igw_id = module.vpc.igw_id
cidr_block = module.vpc.vpc_cidr_block
nat_gateway_enabled = false
nat_instance_enabled = false
tags = local.tags
}
module "eks_workers" {
source = "git::https://github.com/cloudposse/terraform-aws-eks-workers.git?ref=master"
namespace = var.namespace
stage = var.stage
name = var.name
attributes = var.attributes
tags = var.tags
instance_type = var.instance_type
vpc_id = module.vpc.vpc_id
subnet_ids = module.subnets.public_subnet_ids
health_check_type = var.health_check_type
min_size = var.min_size
max_size = var.max_size
wait_for_capacity_timeout = var.wait_for_capacity_timeout
cluster_name = module.label.id
cluster_endpoint = module.eks_cluster.eks_cluster_endpoint
cluster_certificate_authority_data = module.eks_cluster.eks_cluster_certificate_authority_data
cluster_security_group_id = module.eks_cluster.security_group_id
# Auto-scaling policies and CloudWatch metric alarms
autoscaling_policies_enabled = var.autoscaling_policies_enabled
cpu_utilization_high_threshold_percent = var.cpu_utilization_high_threshold_percent
cpu_utilization_low_threshold_percent = var.cpu_utilization_low_threshold_percent
}
module "eks_cluster" {
source = "git::https://github.com/cloudposse/terraform-aws-eks-cluster.git?ref=master"
namespace = var.namespace
stage = var.stage
name = var.name
attributes = var.attributes
tags = var.tags
vpc_id = module.vpc.vpc_id
subnet_ids = module.subnets.public_subnet_ids
kubernetes_version = var.kubernetes_version
# `workers_security_group_count` is needed to prevent `count can't be computed` errors
workers_security_group_ids = [module.eks_workers.security_group_id]
workers_security_group_count = 1
workers_role_arns = [module.eks_workers.workers_role_arn]
kubeconfig_path = var.kubeconfig_path
}
```
include:
Expand All @@ -180,3 +192,7 @@ contributors:
homepage: "https://github.com/goruha/"
avatar: "http://s.gravatar.com/avatar/bc70834d32ed4517568a1feb0b9be7e2?s=144"
github: "goruha"
- name: "Oscar"
homepage: "https://github.com/osulli/"
avatar: "https://avatars1.githubusercontent.com/u/46930728?v=4&s=144"
github: "osulli"
91 changes: 91 additions & 0 deletions auth.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
# The EKS service does not provide a cluster-level API parameter or resource to automatically configure the underlying Kubernetes cluster
# to allow worker nodes to join the cluster via AWS IAM role authentication.

# NOTE: To automatically apply the Kubernetes configuration to the cluster (which allows the worker nodes to join the cluster),
# the requirements outlined here must be met:
# https://learn.hashicorp.com/terraform/aws/eks-intro#preparation
# https://learn.hashicorp.com/terraform/aws/eks-intro#configuring-kubectl-for-eks
# https://learn.hashicorp.com/terraform/aws/eks-intro#required-kubernetes-configuration-to-join-worker-nodes

# Additional links
# https://learn.hashicorp.com/terraform/aws/eks-intro
# https://itnext.io/how-does-client-authentication-work-on-amazon-eks-c4f2b90d943b
# https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html
# https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html
# https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html
# https://docs.aws.amazon.com/en_pv/eks/latest/userguide/create-kubeconfig.html
# https://itnext.io/kubernetes-authorization-via-open-policy-agent-a9455d9d5ceb
# http://marcinkaszynski.com/2018/07/12/eks-auth.html
# https://cloud.google.com/kubernetes-engine/docs/concepts/configmap
# http://yaml-multiline.info
# https://github.com/terraform-providers/terraform-provider-kubernetes/issues/216


locals {
certificate_authority_data_list = coalescelist(aws_eks_cluster.default.*.certificate_authority, [[{ data : "" }]])
certificate_authority_data_list_internal = local.certificate_authority_data_list[0]
certificate_authority_data_map = local.certificate_authority_data_list_internal[0]
certificate_authority_data = local.certificate_authority_data_map["data"]

configmap_auth_template_file = "${path.module}/configmap-auth.yaml.tpl"
configmap_auth_file = "${path.module}/configmap-auth.yaml"

cluster_name = join("", aws_eks_cluster.default.*.id)

# Add worker nodes role ARNs (could be from many worker groups) to the ConfigMap
map_worker_roles = [
for role_arn in var.workers_role_arns : {
rolearn : role_arn
username : "system:node:{{EC2PrivateDNSName}}"
groups : [
"system:bootstrappers",
"system:nodes"
]
}
]

map_worker_roles_yaml = trimspace(yamlencode(local.map_worker_roles))
map_additional_iam_roles_yaml = trimspace(yamlencode(var.map_additional_iam_roles))
map_additional_iam_users_yaml = trimspace(yamlencode(var.map_additional_iam_users))
map_additional_aws_accounts_yaml = trimspace(yamlencode(var.map_additional_aws_accounts))
}

data "template_file" "configmap_auth" {
count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0
template = file(local.configmap_auth_template_file)

vars = {
map_worker_roles_yaml = local.map_worker_roles_yaml
map_additional_iam_roles_yaml = local.map_additional_iam_roles_yaml
map_additional_iam_users_yaml = local.map_additional_iam_users_yaml
map_additional_aws_accounts_yaml = local.map_additional_aws_accounts_yaml
}
}

resource "local_file" "configmap_auth" {
count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0
content = join("", data.template_file.configmap_auth.*.rendered)
filename = local.configmap_auth_file
}

resource "null_resource" "apply_configmap_auth" {
count = var.enabled && var.apply_config_map_aws_auth ? 1 : 0

triggers = {
cluster_updated = join("", aws_eks_cluster.default.*.id)
worker_roles_updated = local.map_worker_roles_yaml
additional_roles_updated = local.map_additional_iam_roles_yaml
additional_users_updated = local.map_additional_iam_users_yaml
additional_aws_accounts_updated = local.map_additional_aws_accounts_yaml
}

depends_on = [aws_eks_cluster.default, local_file.configmap_auth]

provisioner "local-exec" {
command = <<EOT
while [[ ! -e ${local.configmap_auth_file} ]] ; do sleep 1; done && \
aws eks update-kubeconfig --name=${local.cluster_name} --region=${var.region} --kubeconfig=${var.kubeconfig_path} && \
kubectl apply -f ${local.configmap_auth_file} --kubeconfig ${var.kubeconfig_path}
EOT
}
}
Loading

0 comments on commit 6465545

Please sign in to comment.