-
Notifications
You must be signed in to change notification settings - Fork 33
Build a Kubernetes Cluster and LogScale Deployment
nlabadie-crwd edited this page Dec 18, 2023
·
20 revisions
This walks through the process of building a Kubernetes cluster across 3 nodes. The following was used for each node:
- Ubuntu 22.04.1
- (x3) t3.2xlarge instances
- 256 GB of gp2 for each instance.
Please note this was written for lab usage. You will need a valid LogScale license for the deployment.
sudo apt update && sudo apt upgrade
sudo reboot
echo "export KUBECONFIG=~/.kube/config" >> ~/.bashrc
echo "export PATH=$PATH:/var/lib/kurl/helm" >> ~/.bashrc
source ~/.bashrc
# Run this on the COORDINATOR node.
# It can take a while to complete.
curl https://kurl.sh/latest | sudo bash
# Run the final output from the above command on the WORKER nodes.
# Example: curl -fsSL https://kurl.sh/version/v2023.11.20-0/latest/join.sh | sudo bash -s kuber...
# Run this on the COORDINATOR node.
# Type "y" to overwrite the file.
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# Check the nodes.
# Run on the COORDINATOR node.
kubectl get nodes
# Run all of this on the COORDINATOR node.
# Check the website for the latest version.
wget https://github.com/strimzi/strimzi-kafka-operator/releases/download/0.38.0/strimzi-0.38.0.tar.gz
tar xzvf strimzi-0.38.0.tar.gz
cd strimzi-0.38.0
# Create the namespaces.
kubectl create ns kafka
kubectl create ns logging
# Modify the namespace.
sed -i 's/namespace: .*/namespace: kafka/' install/cluster-operator/*RoleBinding*.yaml
# Edit the deployment YAML to set the namespace for the cluster.
# Modify the file based on the section below this.
vi install/cluster-operator/060-Deployment-strimzi-cluster-operator.yaml
# Around line 43.
# Before:
env:
- name: STRIMZI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# After:
env:
- name: STRIMZI_NAMESPACE
value: logging
# Run the following:
kubectl create -f install/cluster-operator/020-RoleBinding-strimzi-cluster-operator.yaml -n logging
kubectl create -f install/cluster-operator/023-RoleBinding-strimzi-cluster-operator.yaml -n logging
kubectl create -f install/cluster-operator/031-RoleBinding-strimzi-cluster-operator-entity-operator-delegation.yaml -n logging
kubectl create -f install/cluster-operator/ -n kafka
# Create the kafka CRD spec.
# Add the section below this to the file.
vi kafka.yaml
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: kafka-00
spec:
kafka:
replicas: 3
listeners:
- name: plain
port: 9092
type: internal
tls: false
- name: tls
port: 9093
type: internal
tls: true
authentication:
type: tls
- name: external
port: 9094
type: nodeport
tls: false
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
size: 32Gi
deleteClaim: true
config:
offsets.topic.replication.factor: 1
transaction.state.log.replication.factor: 1
transaction.state.log.min.isr: 1
default.replication.factor: 1
min.insync.replicas: 1
zookeeper:
replicas: 3
storage:
type: persistent-claim
size: 1Gi
deleteClaim: true
entityOperator:
topicOperator: {}
userOperator: {}
# Apply the config.
kubectl apply -n logging -f kafka.yaml
# Make sure it comes up.
kubectl -n logging get pods -w
# Wait until it says the deployment is complete.
kubectl -n logging describe Kafka
# Deploy the Zookeeper entrance file.
# Add the section below this.
vi zoo-entrance.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-00-zoo-entrance
namespace: logging
labels:
app: kafka-00-zoo-entrance
instance: kafka-00
spec:
replicas: 1
selector:
matchLabels:
app: kafka-00-zoo-entrance
strategy:
type: Recreate
template:
metadata:
labels:
app: kafka-00-zoo-entrance
instance: kafka-00
spec:
containers:
- name: zoo-entrance
image: "ghcr.io/scholzj/zoo-entrance:latest"
command:
- /opt/stunnel/stunnel_run.sh
ports:
- containerPort: 2181
name: zoo
protocol: TCP
env:
- name: LOG_LEVEL
value: notice
- name: STRIMZI_ZOOKEEPER_CONNECT
value: "kafka-00-zookeeper-client:2181"
imagePullPolicy: Always
livenessProbe:
exec:
command:
- /opt/stunnel/stunnel_healthcheck.sh
- "2181"
failureThreshold: 3
initialDelaySeconds: 15
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
exec:
command:
- /opt/stunnel/stunnel_healthcheck.sh
- "2181"
failureThreshold: 3
initialDelaySeconds: 15
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
volumeMounts:
- mountPath: /etc/cluster-operator-certs/
name: cluster-operator-certs
- mountPath: /etc/cluster-ca-certs/
name: cluster-ca-certs
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- name: cluster-operator-certs
secret:
defaultMode: 288
secretName: kafka-00-cluster-operator-certs
- name: cluster-ca-certs
secret:
defaultMode: 288
secretName: kafka-00-cluster-ca-cert
---
apiVersion: v1
kind: Service
metadata:
namespace: logging
labels:
app: kafka-00-zoo-entrance
name: kafka-00-zoo-entrance
spec:
ports:
- name: zoo
port: 2181
protocol: TCP
targetPort: 2181
selector:
app: kafka-00-zoo-entrance
type: ClusterIP
# Apply the config.
kubectl apply -n logging -f zoo-entrance.yaml
# Make sure the pods are running.
kubectl -n logging get pods -w | grep zoo-entrance
# Ensure everything looks sane.
kubectl -n logging describe Deployment
# Check the site for the latest version.
export HUMIO_OPERATOR_VERSION=0.20.1
# Apply the config.
kubectl apply --server-side -f https://raw.githubusercontent.com/humio/humio-operator/humio-operator-${HUMIO_OPERATOR_VERSION}/config/crd/bases/core.humio.com_humioclusters.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/humio/humio-operator/humio-operator-${HUMIO_OPERATOR_VERSION}/config/crd/bases/core.humio.com_humioexternalclusters.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/humio/humio-operator/humio-operator-${HUMIO_OPERATOR_VERSION}/config/crd/bases/core.humio.com_humioingesttokens.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/humio/humio-operator/humio-operator-${HUMIO_OPERATOR_VERSION}/config/crd/bases/core.humio.com_humioparsers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/humio/humio-operator/humio-operator-${HUMIO_OPERATOR_VERSION}/config/crd/bases/core.humio.com_humiorepositories.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/humio/humio-operator/humio-operator-${HUMIO_OPERATOR_VERSION}/config/crd/bases/core.humio.com_humioviews.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/humio/humio-operator/humio-operator-${HUMIO_OPERATOR_VERSION}/config/crd/bases/core.humio.com_humioalerts.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/humio/humio-operator/humio-operator-${HUMIO_OPERATOR_VERSION}/config/crd/bases/core.humio.com_humioactions.yaml
# Configure Helm.
# Change the version to match the version above.
helm repo add humio-operator https://humio.github.io/humio-operator
helm install humio-operator humio-operator/humio-operator --namespace logging --create-namespace --version=${HUMIO_OPERATOR_VERSION}
# Replace with the actual license and password.
kubectl -n logging create secret generic logscale-00-license --from-literal=data=$LICENSE_KEY
# Create the cluster.
# Add the section below this to the file.
# IMPORTANT:
# - Replace the version with the latest LogScale version.
# - Replace the password.
vi logscale-00.yaml
apiVersion: core.humio.com/v1alpha1
kind: HumioCluster
metadata:
name: logscale-00
spec:
image: "humio/humio-core:1.118.0"
license:
secretKeyRef:
name: logscale-00-license
key: data
targetReplicationFactor: 2
nodeCount: 3
storagePartitionsCount: 24
digestPartitionsCount: 24
autoRebalancePartitions: true
tls:
enabled: false
resources:
limits:
cpu: "4"
memory: 4Gi
requests:
cpu: "1"
memory: 2Gi
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: humio_node_type
operator: In
values:
- core
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- humio-core
topologyKey: kubernetes.io/hostname
dataVolumePersistentVolumeClaimSpecTemplate:
storageClassName:
accessModes: [ReadWriteOnce]
resources:
requests:
storage: 40Gi
extraKafkaConfigs: "security.protocol=PLAINTEXT"
environmentVariables:
- name: "KAFKA_SERVERS"
value: "kafka-00-kafka-bootstrap:9092"
- name: "SINGLE_USER_USERNAME"
value: "admin"
- name: "SINGLE_USER_PASSWORD"
value: "ch4ng3th1sp4ss"
- name: "AUTHENTICATION_METHOD"
value: "single-user"
# Apply the config.
kubectl -n logging apply -f logscale-00.yaml
# Make sure the pods come up.
kubectl -n logging get pods -w
# Find the list of LogScale pods.
kubectl -n logging get pods | grep logscale
# From the list, check the output from an individual pod.
# kubectl -n logging logs logscale-00-core-tbqxdh
# kubectl -n logging logs logscale-00-core-tbqxdh -c humio
# Make sure everything looks sane.
kubectl -n logging describe HumioCluster
# Create the service.
# Add the section below this.
vi humio-service.yaml
---
apiVersion: v1
kind: Service
metadata:
name: humio-service
spec:
type: NodePort
selector:
app.kubernetes.io/instance: logscale-00
ports:
- name: humio-ingress
protocol: TCP
port: 8080
targetPort: 8080
nodePort: 28080
# Apply the config.
kubectl -n logging apply -f humio-service.yaml
# Check for sanity.
kubectl -n logging describe Service
kubectl -n logging get service
# Ensure the connection is successful.
# At this point you should be able to connect via HTTP on the ip address or DNS name.
# http://$public_ip_or_dns_name_of_host:28080
# Change the TLS setting in the file to true.
vi logscale-00.yaml
# Install cert manager.
kubectl create namespace cert-manager
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --set installCRDs=true
# Apply the config.
kubectl -n logging apply -f logscale-00.yaml
# Check the changes.
kubectl -n logging describe HumioCluster
# Add ingress.
helm upgrade --install ingress-nginx ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace
# Create certs.
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout logscale.logscale-00.com.key -out logscale.logscale-00.com.crt -subj "/CN=logscale.logscale-00.com/O=logscale.logscale-00.com"
# Load the secret into Kubernetes.
kubectl -n logging create secret tls cert-logscale-00 --key logscale.logscale-00.com.key --cert logscale.logscale-00.com.crt
# Create the ingress object.
# Add the following section to the file.
vi logscale-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: logscale-00-ingress
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/proxy-ssl-name: "logscale-00.logging"
nginx.ingress.kubernetes.io/proxy-ssl-server-name: "logscale-00.logging"
nginx.ingress.kubernetes.io/proxy-ssl-secret: "logging/logscale-00"
spec:
ingressClassName: nginx
tls:
- hosts:
- logscale.logscale-00.com
secretName: cert-logscale-00
rules:
- host: logscale.logscale-00.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: logscale-00
port:
number: 8080
# Apply the spec.
kubectl -n logging apply -f logscale-ingress.yaml
# Check for sanity.
kubectl -n logging describe Ingress
# Create a node port.
# Add the following section to the file.
vi ingress-nodeport.yaml
---
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx-nodeport
spec:
type: NodePort
selector:
app.kubernetes.io/name: ingress-nginx
ports:
- name: humio-ingress
protocol: TCP
port: 443
targetPort: 443
nodePort: 28443
# Apply the spec.
kubectl -n ingress-nginx apply -f ingress-nodeport.yaml
# Check the service.
kubectl -n ingress-nginx get services
# Ensure the connection is successful.
# At this point you should be able to connect via HTTPS on the DNS name.
# https://$dns_name_of_host:28443
# Deleting things that were applied.
# Literally just swap out "apply" with "delete" on the above commands.
kubectl -n logging delete -f kafka.yaml
kubectl -n logging delete -f logscale-00.yaml
kubectl -n ingress-nginx delete -f ingress-nodeport.yaml
# Find out details about a service.
kubectl -n logging describe Kafka
kubectl -n logging describe Deployment
kubectl -n logging describe Service
kubectl -n logging describe HumioCluster
kubectl -n logging describe Ingress
# Show which services are listening on which ports.
kubectl -n logging get service
# List all running pods in a namespace.
kubectl -n logging get pods
# Get logs from a particular pod.
kubectl -n logging logs logscale-00-core-tbqxdh
# Get application logs from a pod.
kubectl -n logging logs logscale-00-core-tbqxdh -c humio