forked from cloudposse/terraform-aws-eks-cluster
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.tf
150 lines (120 loc) · 5.42 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
provider "aws" {
region = var.region
}
module "label" {
source = "cloudposse/label/null"
version = "0.25.0"
attributes = ["cluster"]
context = module.this.context
}
data "aws_caller_identity" "current" {}
data "aws_iam_session_context" "current" {
arn = data.aws_caller_identity.current.arn
}
locals {
enabled = module.this.enabled
private_ipv6_enabled = var.private_ipv6_enabled
# The usage of the specific kubernetes.io/cluster/* resource tags below are required
# for EKS and Kubernetes to discover and manage networking resources
# https://aws.amazon.com/premiumsupport/knowledge-center/eks-vpc-subnet-discovery/
# https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/deploy/subnet_discovery.md
tags = { "kubernetes.io/cluster/${module.label.id}" = "shared" }
# required tags to make ALB ingress work https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html
public_subnets_additional_tags = {
"kubernetes.io/role/elb" : 1
}
private_subnets_additional_tags = {
"kubernetes.io/role/internal-elb" : 1
}
# Enable the IAM user creating the cluster to administer it,
# without using the bootstrap_cluster_creator_admin_permissions option,
# as a way to test the access_entry_map feature.
# In general, this is not recommended. Instead, you should
# create the access_entry_map statically, with the ARNs you want to
# have access to the cluster. We do it dynamically here just for testing purposes.
access_entry_map = {
(data.aws_iam_session_context.current.issuer_arn) = {
access_policy_associations = {
ClusterAdmin = {}
}
}
}
# https://docs.aws.amazon.com/eks/latest/userguide/managing-vpc-cni.html#vpc-cni-latest-available-version
vpc_cni_addon = {
addon_name = "vpc-cni"
addon_version = null
resolve_conflicts = "OVERWRITE"
service_account_role_arn = one(module.vpc_cni_eks_iam_role[*].service_account_role_arn)
}
addons = concat([
local.vpc_cni_addon
], var.addons)
}
module "vpc" {
source = "cloudposse/vpc/aws"
version = "2.2.0"
ipv4_primary_cidr_block = "172.16.0.0/16"
tags = local.tags
context = module.this.context
}
module "subnets" {
source = "cloudposse/dynamic-subnets/aws"
version = "2.4.2"
availability_zones = var.availability_zones
vpc_id = module.vpc.vpc_id
igw_id = [module.vpc.igw_id]
ipv4_cidr_block = [module.vpc.vpc_cidr_block]
ipv6_cidr_block = [module.vpc.vpc_ipv6_cidr_block]
ipv6_enabled = true
max_nats = 1
nat_gateway_enabled = true
nat_instance_enabled = false
tags = local.tags
public_subnets_additional_tags = local.public_subnets_additional_tags
private_subnets_enabled = true
private_subnets_additional_tags = local.private_subnets_additional_tags
context = module.this.context
}
module "eks_cluster" {
source = "../../"
subnet_ids = concat(module.subnets.private_subnet_ids, module.subnets.public_subnet_ids)
kubernetes_version = var.kubernetes_version
oidc_provider_enabled = var.oidc_provider_enabled
enabled_cluster_log_types = var.enabled_cluster_log_types
cluster_log_retention_period = var.cluster_log_retention_period
cluster_encryption_config_enabled = var.cluster_encryption_config_enabled
cluster_encryption_config_kms_key_id = var.cluster_encryption_config_kms_key_id
cluster_encryption_config_kms_key_enable_key_rotation = var.cluster_encryption_config_kms_key_enable_key_rotation
cluster_encryption_config_kms_key_deletion_window_in_days = var.cluster_encryption_config_kms_key_deletion_window_in_days
cluster_encryption_config_kms_key_policy = var.cluster_encryption_config_kms_key_policy
cluster_encryption_config_resources = var.cluster_encryption_config_resources
addons = local.addons
addons_depends_on = [module.eks_node_group]
bootstrap_self_managed_addons_enabled = var.bootstrap_self_managed_addons_enabled
upgrade_policy = var.upgrade_policy
zonal_shift_config = var.zonal_shift_config
access_entry_map = local.access_entry_map
access_config = {
authentication_mode = "API"
bootstrap_cluster_creator_admin_permissions = false
}
# This is to test `allowed_security_group_ids` and `allowed_cidr_blocks`
# In a real cluster, these should be some other (existing) Security Groups and CIDR blocks to allow access to the cluster
allowed_security_group_ids = [module.vpc.vpc_default_security_group_id]
allowed_cidr_blocks = [module.vpc.vpc_cidr_block]
kubernetes_network_ipv6_enabled = local.private_ipv6_enabled
context = module.this.context
cluster_depends_on = [module.subnets]
}
module "eks_node_group" {
source = "cloudposse/eks-node-group/aws"
version = "2.12.0"
subnet_ids = module.subnets.private_subnet_ids
cluster_name = module.eks_cluster.eks_cluster_id
instance_types = var.instance_types
desired_size = var.desired_size
min_size = var.min_size
max_size = var.max_size
kubernetes_labels = var.kubernetes_labels
context = module.this.context
}