Kafka¶
locals {
# make sure all components use same compression.type
compression_type = "zstd"
kafka_server = "kafka.kafka.svc.cluster.local:9092" # Port number is mandatory
}
Generate a leader election ID¶
resource "random_string" "cluster_id" {
length = 32
special = false
}
Create the kafka
namespace¶
resource "kubernetes_namespace" "kafka" {
metadata {
name = "kafka"
labels = {
"pod-security.kubernetes.io/enforce" = "restricted"
}
}
}
Deploy the Kafka chart¶
Deploy the kafka Helm chart from Bitnami:
# https://artifacthub.io/packages/helm/bitnami/kafka
resource "helm_release" "kafka" {
name = "kafka"
namespace = kubernetes_namespace.kafka.metadata[0].name
repository = "oci://registry-1.docker.io/bitnamicharts"
version = "27.1.2"
chart = "kafka"
values = [yamlencode({
controller = {
replicaCount = 3
resourcesPreset = "large"
}
extraConfig = {
# don't create new topics on the fly
autoCreateTopicsEnable = false
}
kraft = {
# use the leader election ID generated previously
clusterId = random_string.cluster_id.result
}
listeners = {
client = {
protocol = "PLAINTEXT"
}
}
persistentVolumeClaimRetentionPolicy = {
enabled = true
whenScaled = "Retain"
whenDeleted = "Retain"
}
provisioning = {
enabled = true
numPartitions = 60
replicationFactor = 3
resourcesPreset = "small"
topics = [
{
name = "dyff.workflows.events"
config = {
"compression.type" = local.compression_type
# retain records forever
"retention.bytes" = "-1"
"retention.ms" = "-1"
}
},
{
# messages in state are expected to be retained forever
name = "dyff.workflows.state"
config = {
"compression.type" = local.compression_type
# retain only the latest value for each key
"cleanup.policy" = "compact"
# retain records forever
"retention.bytes" = "-1"
"retention.ms" = "-1"
}
}
]
}
})]
}