Skip to content

add karpenter for auto-scaling #65

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 73 additions & 3 deletions examples/simple/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,51 @@ provider "helm" {
}
}

provider "kubectl" {
host = module.materialize_infrastructure.eks_cluster_endpoint
cluster_ca_certificate = base64decode(module.materialize_infrastructure.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
args = ["eks", "get-token", "--cluster-name", module.materialize_infrastructure.eks_cluster_name]
}
load_config_file = false
}


locals {
with_karpenter_installed = {
"clusterd" = {
nodeSelector = {
workload = "materialize-instance-karpenter"
}
}
"operator" = {
nodeSelector = {
workload = "materialize-instance"
}
}
"environmentd" = {
nodeSelector = {
workload = "materialize-instance-karpenter"
}
}
"console" = {
nodeSelector = {
workload = "materialize-instance"
# "karpenter.sh/registered" = "m7g.medium"
}
}
"balancerd" = {
nodeSelector = {
workload = "materialize-instance"
# "karpenter.sh/registered" = "m7g.medium"
}
}
}
node_selectors = var.install_karpenter ? local.with_karpenter_installed : {}
}

module "materialize_infrastructure" {
# To pull this from GitHub, use the following:
# source = "git::https://github.com/MaterializeInc/terraform-aws-materialize.git"
Expand All @@ -52,11 +97,15 @@ module "materialize_infrastructure" {
# EKS Configuration
cluster_version = "1.32"
node_group_instance_types = ["r7gd.2xlarge"]
node_group_desired_size = 1
node_group_min_size = 1
node_group_desired_size = 2
node_group_min_size = 2
node_group_max_size = 2
node_group_capacity_type = "ON_DEMAND"
enable_cluster_creator_admin_permissions = true
install_karpenter = var.install_karpenter
karpenter_instance_sizes = var.karpenter_instance_sizes

enable_disk_support = true

# Storage Configuration
bucket_force_destroy = true
Expand Down Expand Up @@ -87,7 +136,7 @@ module "materialize_infrastructure" {
install_materialize_operator = true
operator_version = var.operator_version
orchestratord_version = var.orchestratord_version
helm_values = var.helm_values
helm_values = merge(local.node_selectors, var.helm_values)

# Once the operator is installed, you can define your Materialize instances here.
materialize_instances = var.materialize_instances
Expand Down Expand Up @@ -172,6 +221,27 @@ variable "use_self_signed_cluster_issuer" {
default = true
}

variable "install_karpenter" {
description = "Whether to install karpenter: https://karpenter.sh"
type = bool
default = false
}

variable "karpenter_instance_sizes" {
description = "Additional settings for Karpenter Helm chart"
type = list(string)
default = [
# Optionally, console and balancer don't
# need disk we should be able to throw them on their own nodes
# "m7g.medium",
# Recommended clusters sizes
"r7gd.xlarge",
"r7gd.2xlarge",
"r7gd.4xlarge",
"r7gd.8xlarge",
"r7gd.16xlarge",
]
}
# Outputs
output "vpc_id" {
description = "VPC ID"
Expand Down
4 changes: 4 additions & 0 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,11 @@ module "eks" {
# e.g. ${namespace}-${environment}-eks
namespace = var.namespace
environment = var.environment
region = data.aws_region.current.name

cluster_version = var.cluster_version
vpc_id = local.network_id
vpc_cidr = var.vpc_cidr
private_subnet_ids = local.network_private_subnet_ids
node_group_desired_size = var.node_group_desired_size
node_group_min_size = var.node_group_min_size
Expand All @@ -35,6 +37,8 @@ module "eks" {
node_group_capacity_type = var.node_group_capacity_type
enable_cluster_creator_admin_permissions = var.enable_cluster_creator_admin_permissions

install_karpenter = var.install_karpenter

install_openebs = local.disk_config.install_openebs
enable_disk_setup = local.disk_config.run_disk_setup_script
openebs_namespace = local.disk_config.openebs_namespace
Expand Down
109 changes: 109 additions & 0 deletions modules/eks/karpenter.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
resource "kubectl_manifest" "karpenter_node_class" {
count = var.install_karpenter ? 1 : 0
force_conflicts = true


yaml_body = yamlencode({
apiVersion = "karpenter.k8s.aws/v1"
kind = "EC2NodeClass"
metadata = {
name = "${local.name_prefix}-node-class2"
}
spec = {
amiSelectorTerms = [{
alias = "al2023@latest"
}]
role = module.eks.eks_managed_node_groups["${local.name_prefix}-mz"].iam_role_name
subnetSelectorTerms = [
for sn_id in var.private_subnet_ids :
{
id = sn_id
}
]
securityGroupSelectorTerms = [
{
id = module.eks.node_security_group_id
}
]
kubelet = {
clusterDNS = [cidrhost(module.eks.cluster_service_cidr, 10)]
}
blockDeviceMappings = [
{
deviceName = "/dev/xvda"
ebs = {
deleteOnTermination = true
volumeSize = "20Gi"
volumeType = "gp3"
encrypted = true
}
}
]

userData = var.enable_disk_setup ? local.disk_setup_script : ""
tags = merge(var.tags, {
Name = "${local.name_prefix}-karpenter"
})
metadataOptions = {
httpEndpoint = "enabled"
httpProtocolIPv6 = "enabled"
httpPutResponseHopLimit = 3
httpTokens = "required"
}
}
})

depends_on = [helm_release.karpenter]
}

resource "kubectl_manifest" "karpenter_node_pool" {
count = var.install_karpenter ? 1 : 0
force_conflicts = true

yaml_body = yamlencode({
apiVersion = "karpenter.sh/v1"
kind = "NodePool"
metadata = {
name = "${local.name_prefix}-node-pool2"
}
spec = {
template = {
metadata = {
labels = {
"Environment" = var.environment
"Name" = "${local.name_prefix}-karpenter_node_pool"
"materialize.cloud/disk" = var.enable_disk_setup ? "true" : "false"
"workload" = "materialize-instance-karpenter"
}
}
spec = {
expireAfter = "Never"
nodeClassRef = {
group = "karpenter.k8s.aws"
kind = "EC2NodeClass"
name = "${local.name_prefix}-node-class2"
}
requirements = [
{
key = "node.kubernetes.io/instance-type"
operator = "In"
values = var.karpenter_instance_sizes
},
{
key = "karpenter.sh/capacity-type"
operator = "In"
values = ["on-demand", "reserved"]
}
]
}
}
disruption = {
consolidationPolicy = "WhenEmpty"
consolidateAfter = "15s"

}
}
})

depends_on = [kubectl_manifest.karpenter_node_class]
}
Loading
Loading