Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add additional Helm documentation for Fleet Server, and Agent #6154

Merged
merged 8 commits into from
Dec 29, 2022
16 changes: 8 additions & 8 deletions deploy/eck-elasticsearch/examples/hot-warm-cold.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ nodeSets:
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html
#
node.store.allow_mmap: false
podTemplate:
Expand Down Expand Up @@ -35,7 +35,7 @@ nodeSets:
# values:
# - highio
# Volume Claim settings.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/2.2/k8s-volume-claim-templates.html
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html
#
volumeClaimTemplates:
- metadata:
Expand All @@ -56,7 +56,7 @@ nodeSets:
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html
#
node.store.allow_mmap: false
podTemplate:
Expand Down Expand Up @@ -84,7 +84,7 @@ nodeSets:
# values:
# - highio
# Volume Claim settings.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/2.2/k8s-volume-claim-templates.html
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html
#
volumeClaimTemplates:
- metadata:
Expand All @@ -105,7 +105,7 @@ nodeSets:
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html
#
node.store.allow_mmap: false
podTemplate:
Expand Down Expand Up @@ -133,7 +133,7 @@ nodeSets:
# values:
# - highstorage
# Volume Claim settings.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/2.2/k8s-volume-claim-templates.html
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html
#
volumeClaimTemplates:
- metadata:
Expand All @@ -154,7 +154,7 @@ nodeSets:
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html
#
node.store.allow_mmap: false
podTemplate:
Expand Down Expand Up @@ -182,7 +182,7 @@ nodeSets:
# values:
# - highstorage
# Volume Claim settings.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/2.2/k8s-volume-claim-templates.html
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html
#
volumeClaimTemplates:
- metadata:
Expand Down
2 changes: 1 addition & 1 deletion deploy/eck-elasticsearch/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ nodeSets:
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html
#
node.store.allow_mmap: false
podTemplate:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,48 +6,25 @@ eck-elasticsearch:
#
fullnameOverride: elasticsearch

# Version of Elasticsearch.
#
version: 8.2.3

nodeSets:
- name: default
count: 3
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html
#
config:
node.store.allow_mmap: false

eck-kibana:
enabled: true

# Name of the Kibana instance.
#
fullnameOverride: kibana

# Version of Kibana.
#
version: 8.2.3

spec:
# Count of Kibana instances to create.
#
count: 1

# Reference to ECK-managed Elasticsearch instance, ideally from {{ "elasticsearch.fullname" }}
#
elasticsearchRef:
name: elasticsearch

config:
# Note that these are specific to the namespace into which this example is installed, and are
# using `default` as configured here. If installed outside of the `default` namespace,
# using `elastic-stack` as configured here. If installed outside of the `elastic-stack` namespace,
# these 2 lines need modification.
xpack.fleet.agents.elasticsearch.hosts: ["https://elasticsearch-es-http.default.svc:9200"]
xpack.fleet.agents.fleet_server.hosts: ["https://fleet-server-agent-http.default.svc:8220"]
xpack.fleet.agents.elasticsearch.hosts: ["https://elasticsearch-es-http.elastic-stack.svc:9200"]
xpack.fleet.agents.fleet_server.hosts: ["https://fleet-server-agent-http.elastic-stack.svc:8220"]
xpack.fleet.packages:
- name: system
version: latest
Expand Down
2 changes: 1 addition & 1 deletion deploy/eck-stack/examples/custom-elasticsearch-kibana.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ eck-elasticsearch:
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html
#
node.store.allow_mmap: false
volumeClaimTemplates:
Expand Down
199 changes: 199 additions & 0 deletions deploy/eck-stack/examples/elasticsearch/hot-warm-cold.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,199 @@
---
eck-elasticsearch:
nodeSets:
- name: masters
count: 1
config:
node.roles: ["master"]
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html
#
node.store.allow_mmap: false
podTemplate:
spec:
containers:
- name: elasticsearch
resources:
limits:
memory: 8Gi
cpu: 2
# Affinity/Anti-affinity settings for controlling the 'spreading' of Elasticsearch
# pods across existing hosts.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-advanced-node-scheduling.html#k8s-affinity-options
#
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: beta.kubernetes.io/instance-type
# operator: In
# # This should be adjusted to the instance type according to your setup
# #
# values:
# - highio
# Volume Claim settings.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html
#
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Ti
# Adjust to your storage class name
#
# storageClassName: local-storage
- name: hot
count: 1
config:
node.roles: ["data_hot", "data_content", "ingest"]
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html
#
node.store.allow_mmap: false
podTemplate:
spec:
containers:
- name: elasticsearch
resources:
limits:
memory: 16Gi
cpu: 4
# Affinity/Anti-affinity settings for controlling the 'spreading' of Elasticsearch
# pods across existing hosts.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-advanced-node-scheduling.html#k8s-affinity-options
#
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: beta.kubernetes.io/instance-type
# operator: In
# # This should be adjusted to the instance type according to your setup
# #
# values:
# - highio
# Volume Claim settings.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html
#
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Ti
# Adjust to your storage class name
#
# storageClassName: local-storage
- name: warm
count: 1
config:
node.roles: ["data_warm"]
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html
#
node.store.allow_mmap: false
podTemplate:
spec:
containers:
- name: elasticsearch
resources:
limits:
memory: 16Gi
cpu: 2
# Affinity/Anti-affinity settings for controlling the 'spreading' of Elasticsearch
# pods across existing hosts.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-advanced-node-scheduling.html#k8s-affinity-options
#
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: beta.kubernetes.io/instance-type
# operator: In
# # This should be adjusted to the instance type according to your setup
# #
# values:
# - highstorage
# Volume Claim settings.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html
#
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Ti
# Adjust to your storage class name
#
# storageClassName: local-storage
- name: cold
count: 1
config:
node.roles: ["data_cold"]
# Comment out when setting the vm.max_map_count via initContainer, as these are mutually exclusive.
# For production workloads, it is strongly recommended to increase the kernel setting vm.max_map_count to 262144
# and leave node.store.allow_mmap unset.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html
#
node.store.allow_mmap: false
podTemplate:
spec:
containers:
- name: elasticsearch
resources:
limits:
memory: 8Gi
cpu: 2
# Affinity/Anti-affinity settings for controlling the 'spreading' of Elasticsearch
# pods across existing hosts.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-advanced-node-scheduling.html#k8s-affinity-options
#
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: beta.kubernetes.io/instance-type
# operator: In
# # This should be adjusted to the instance type according to your setup
# #
# values:
# - highstorage
# Volume Claim settings.
# ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html
#
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Ti
# Adjust to your storage class name
#
# storageClassName: local-storage
24 changes: 24 additions & 0 deletions deploy/eck-stack/examples/kibana/http-configuration.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
---
eck-kibana:
spec:
# Count of Kibana replicas to create.
#
count: 1

# Reference to ECK-managed Elasticsearch resource, ideally from {{ "elasticsearch.fullname" }}
#
elasticsearchRef:
name: es-quickstart-eck-elasticsearch
# namespace: default
http:
service:
spec:
# Type of service to deploy for Kibana.
# This deploys a load balancer in a cloud service provider, where supported.
#
type: LoadBalancer
# tls:
# selfSignedCertificate:
# subjectAltNames:
# - ip: 1.2.3.4
# - dns: kibana.example.com
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,19 @@ To use one or more of these example configurations, use the `--values` Helm opti
[source,sh]
----
# Install an eck-managed Elasticsearch and Kibana using the Elasticsearch node roles example with hot, warm, and cold data tiers, and the Kibana example customizing the http service.
helm install es-quickstart elastic/eck-stack -n elastic-stack --create-namespace --values https://github.com/elastic/cloud-on-k8s/tree/main/deploy/eck-stack/examples/elasticsearch/hot-warm-cold.yaml --values https://github.com/elastic/cloud-on-k8s/tree/main/deploy/eck-stack/examples/kibana/http-configuration.yaml
helm install es-quickstart elastic/eck-stack -n elastic-stack --create-namespace --values https://raw.githubusercontent.com/elastic/cloud-on-k8s/{eck_release_branch}/deploy/eck-stack/examples/elasticsearch/hot-warm-cold.yaml --values https://raw.githubusercontent.com/elastic/cloud-on-k8s/{eck_release_branch}/deploy/eck-stack/examples/kibana/http-configuration.yaml
----

[float]
[id="{p}-install-fleet-agent-elasticsearch-kibana-helm"]
== Installing Fleet Server with Elastic Agents along with Elasticsearch and Kibana using the eck-stack Helm Chart

The following section builds upon the previous section, and allows installing Fleet Server, and Fleet-managed Elastic Agents along with Elasticsearch and Kibana.

[source,sh]
----
# Install an eck-managed Elasticsearch, Kibana, Fleet Server, and managed Elastic Agents using custom values.
helm install eck-stack-with-fleet elastic/eck-stack --values https://raw.githubusercontent.com/elastic/cloud-on-k8s/{eck_release_branch}/deploy/eck-stack/examples/agent/fleet-agents.yaml -n elastic-stack
----

[float]
Expand Down