-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.tf
230 lines (189 loc) · 6.75 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
locals {
common_labels = merge(var.tags, {
managed_by = "terraform"
module = "materialize"
})
}
module "networking" {
source = "./modules/networking"
resource_group_name = var.resource_group_name
location = var.location
prefix = var.prefix
vnet_address_space = var.network_config.vnet_address_space
subnet_cidr = var.network_config.subnet_cidr
postgres_subnet_cidr = var.network_config.postgres_subnet_cidr
tags = local.common_labels
}
module "aks" {
source = "./modules/aks"
depends_on = [module.networking]
resource_group_name = var.resource_group_name
location = var.location
prefix = var.prefix
vnet_name = module.networking.vnet_name
subnet_name = module.networking.aks_subnet_name
subnet_id = module.networking.aks_subnet_id
service_cidr = var.network_config.service_cidr
vm_size = var.aks_config.vm_size
disk_size_gb = var.aks_config.disk_size_gb
min_nodes = var.aks_config.min_nodes
max_nodes = var.aks_config.max_nodes
tags = local.common_labels
}
module "database" {
source = "./modules/database"
depends_on = [module.networking]
database_name = var.database_config.db_name
database_user = var.database_config.username
resource_group_name = var.resource_group_name
location = var.location
prefix = var.prefix
subnet_id = module.networking.postgres_subnet_id
private_dns_zone_id = module.networking.private_dns_zone_id
sku_name = var.database_config.sku_name
postgres_version = var.database_config.postgres_version
password = var.database_config.password
tags = local.common_labels
}
// TODO we should be generating one storage container per materialize_instance
module "storage" {
source = "./modules/storage"
depends_on = [module.aks, module.networking]
resource_group_name = var.resource_group_name
location = var.location
prefix = var.prefix
identity_principal_id = module.aks.workload_identity_principal_id
subnets = [module.networking.aks_subnet_id]
tags = local.common_labels
}
module "certificates" {
source = "./modules/certificates"
install_cert_manager = var.install_cert_manager
cert_manager_install_timeout = var.cert_manager_install_timeout
cert_manager_chart_version = var.cert_manager_chart_version
use_self_signed_cluster_issuer = var.use_self_signed_cluster_issuer && length(var.materialize_instances) > 0
cert_manager_namespace = var.cert_manager_namespace
name_prefix = var.prefix
depends_on = [
module.aks,
]
}
locals {
default_helm_values = {
operator = {
image = var.orchestratord_version == null ? {} : {
tag = var.orchestratord_version
},
cloudProvider = {
type = "azure"
region = var.location
}
}
observability = {
podMetrics = {
enabled = true
}
}
tls = (var.use_self_signed_cluster_issuer && length(var.materialize_instances) > 0) ? {
defaultCertificateSpecs = {
balancerdExternal = {
dnsNames = [
"balancerd",
]
issuerRef = {
name = module.certificates.cluster_issuer_name
kind = "ClusterIssuer"
}
}
consoleExternal = {
dnsNames = [
"console",
]
issuerRef = {
name = module.certificates.cluster_issuer_name
kind = "ClusterIssuer"
}
}
internal = {
issuerRef = {
name = module.certificates.cluster_issuer_name
kind = "ClusterIssuer"
}
}
}
} : {}
}
merged_helm_values = merge(local.default_helm_values, var.helm_values)
instances = [
for instance in var.materialize_instances : {
name = instance.name
namespace = instance.namespace
database_name = instance.database_name
environmentd_version = instance.environmentd_version
metadata_backend_url = format(
"postgres://%s@%s/%s?sslmode=require",
"${var.database_config.username}:${var.database_config.password}",
module.database.database_host,
coalesce(instance.database_name, instance.name)
)
// the endpoint by default ends in `/` we want to remove that
# persist_backend_url = substr(module.storage.primary_blob_endpoint, 0, length(module.storage.primary_blob_endpoint) - 1)
persist_backend_url = format(
"%s%s?%s",
module.storage.primary_blob_endpoint,
module.storage.container_name,
module.storage.primary_blob_sas_token
)
license_key = instance.license_key
create_load_balancer = instance.create_load_balancer
internal_load_balancer = instance.internal_load_balancer
cpu_request = instance.cpu_request
memory_request = instance.memory_request
memory_limit = instance.memory_limit
create_database = instance.create_database
in_place_rollout = instance.in_place_rollout
request_rollout = instance.request_rollout
force_rollout = instance.force_rollout
balancer_cpu_request = instance.balancer_cpu_request
balancer_memory_request = instance.balancer_memory_request
balancer_memory_limit = instance.balancer_memory_limit
}
]
}
module "operator" {
source = "github.com/MaterializeInc/terraform-helm-materialize?ref=v0.1.12"
count = var.install_materialize_operator ? 1 : 0
depends_on = [
module.aks,
module.database,
module.storage,
module.certificates,
]
namespace = var.namespace
environment = var.prefix
operator_version = var.operator_version
operator_namespace = var.operator_namespace
# The metrics server already exists in the AKS cluster
install_metrics_server = false
helm_values = local.merged_helm_values
instances = local.instances
// For development purposes, you can use a local Helm chart instead of fetching it from the Helm repository
use_local_chart = var.use_local_chart
helm_chart = var.helm_chart
providers = {
kubernetes = kubernetes
helm = helm
}
}
module "load_balancers" {
source = "./modules/load_balancers"
for_each = { for idx, instance in local.instances : instance.name => instance if lookup(instance, "create_load_balancer", false) }
instance_name = each.value.name
namespace = module.operator[0].materialize_instances[each.value.name].namespace
resource_id = module.operator[0].materialize_instance_resource_ids[each.value.name]
internal = each.value.internal_load_balancer
depends_on = [
module.operator,
module.aks,
]
}