Skip to content

Commit 532825a

Browse files
committed
Updated images and manifests to 6.5.4
1 parent 0d74f0c commit 532825a

20 files changed

+346
-251
lines changed

README.md

+117-116
Large diffs are not rendered by default.

deploy

+16-27
Original file line numberDiff line numberDiff line change
@@ -18,43 +18,32 @@ kctl() {
1818
}
1919
# alias kctl='kubectl --namespace logging'
2020

21-
# Deploy Elasticsearch service
22-
kctl apply -f es-discovery-svc.yaml
23-
kctl apply -f es-svc.yaml
21+
# Deploy ElasticSearch configmap
2422
kctl apply -f es-configmap.yaml
2523

26-
# Deploy Elasticsearch master node and wait until it's up
27-
kctl apply -f es-master.yaml
28-
until kctl rollout status deployment es-master > /dev/null 2>&1; do sleep 1; printf "."; done
29-
30-
# Deploy Elasticsearch client node and wait until it's up
31-
kctl apply -f es-client.yaml
32-
until kctl rollout status deployment es-client > /dev/null 2>&1; do sleep 1; printf "."; done
33-
34-
# Deploy Elasticsearch data node and wait until it's up
35-
kctl apply -f es-data-statefulset.yaml
36-
until kctl rollout status statefulset es-data > /dev/null 2>&1; do sleep 1; printf "."; done
37-
38-
# As an alternative, replace es-discovery-svc.yaml, es-svc.yaml and the master, client and data manifests
39-
# with the ones below to have a three-node cluster with all roles in all nodes.
40-
#kctl apply -f es-full-svc.yaml
41-
#kctl apply -f es-full.yaml
42-
24+
# As an alternative, replace master, client and data manifests applied above
25+
# with the ones below to have a three-node cluster with all roles in all nodes.
26+
kctl apply -f es-full-svc.yaml
27+
kctl apply -f es-full-statefulset.yaml
28+
until kctl rollout status statefulset es-full > /dev/null 2>&1; do sleep 1; printf "."; done
4329

4430
# Deploy Curator
45-
kctl apply -f es-curator-config.yaml
46-
kctl apply -f es-curator_v1beta1.yaml
31+
kctl apply -f es-curator-configmap.yaml
32+
kctl apply -f es-curator-cronjob.yaml
33+
34+
# Deploy Cerebro
35+
kctl apply -f cerebro.yaml
36+
kctl apply -f cerebro-external-ingress.yaml
4737

4838
# Deploy Kibana
4939
kctl apply -f kibana-configmap.yaml
50-
kctl apply -f kibana-external-ingress.yaml
51-
kctl apply -f kibana-service-account.yaml
5240
kctl apply -f kibana-svc.yaml
53-
kctl apply -f kibana.yaml
41+
kctl apply -f kibana-deployment.yaml
42+
kctl apply -f kibana-external-ingress.yaml
5443

5544
# Deploy Fluentd
56-
kctl apply -f fluentd-es-configmap.yaml
57-
kctl apply -f fluentd-es-ds.yaml
45+
kctl apply -f fluentd-configmap.yaml
46+
kctl apply -f fluentd-daemonset.yaml
5847

5948
echo "done!"
6049

es-curator-configmap.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ data:
2626
direction: older
2727
timestring: '%Y.%m.%d'
2828
unit: days
29-
unit_count: 15
29+
unit_count: 10
3030
field:
3131
stats_result:
3232
epoch:

es-curator-cronjob.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ spec:
66
schedule: 0 8 * * *
77
jobTemplate:
88
spec:
9+
ttlSecondsAfterFinished: 180
910
template:
1011
spec:
1112
containers:
File renamed without changes.

all-roles-nodes/es-full.yaml renamed to es-full.yaml

+4-9
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,10 @@ spec:
3333
image: carlosedp/docker-elasticsearch-kubernetes:6.5.4
3434
resources:
3535
limits:
36-
cpu: 1
36+
cpu: 2
3737
requests:
3838
cpu: 1
39-
memory: "768Mi"
39+
memory: 1Gi
4040
ports:
4141
- containerPort: 9300
4242
name: transport
@@ -54,7 +54,7 @@ spec:
5454
- name: CLUSTER_NAME
5555
value: myesdb
5656
- name: ES_JAVA_OPTS
57-
value: -Xms512m -Xmx512m
57+
value: -Xms1G -Xmx1G
5858
- name: NETWORK_HOST
5959
value: _site_,_lo_
6060
- name: NUMBER_OF_MASTERS
@@ -68,13 +68,8 @@ spec:
6868
livenessProbe:
6969
tcpSocket:
7070
port: transport
71-
initialDelaySeconds: 180
71+
initialDelaySeconds: 120
7272
periodSeconds: 10
73-
readinessProbe:
74-
httpGet:
75-
path: /_cluster/health
76-
port: http
77-
initialDelaySeconds: 180
7873
timeoutSeconds: 60
7974
volumeMounts:
8075
- name: config

fluentd-configmap.yaml

+11
Original file line numberDiff line numberDiff line change
@@ -446,6 +446,9 @@ data:
446446
request_timeout 45s
447447
reload_connections false
448448
logstash_format true
449+
template_name template-logstash
450+
template_file /etc/fluent/config.d/template-logstash.json
451+
template_overwrite true
449452
id_key _hash # specify same key name which is specified in hash_id_key
450453
remove_keys _hash # Elasticsearch doesn't like keys that start with _
451454
<buffer>
@@ -463,3 +466,11 @@ data:
463466
</buffer>
464467
</match>
465468
469+
template-logstash.json: |-
470+
{
471+
"index_patterns": ["logstash*"],
472+
"settings": {
473+
"number_of_shards": 3,
474+
"number_of_replicas": 0
475+
}
476+
}

fluentd-daemonset.yaml

-9
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,6 @@ metadata:
5252
namespace: logging
5353
labels:
5454
k8s-app: fluentd-es
55-
kubernetes.io/cluster-service: "true"
56-
addonmanager.kubernetes.io/mode: Reconcile
5755
spec:
5856
selector:
5957
matchLabels:
@@ -62,14 +60,7 @@ spec:
6260
metadata:
6361
labels:
6462
k8s-app: fluentd-es
65-
kubernetes.io/cluster-service: "true"
66-
# This annotation ensures that fluentd does not get evicted if the node
67-
# supports critical pod annotation based priority scheme.
68-
# Note that this does not guarantee admission on the nodes (#40573).
69-
annotations:
70-
scheduler.alpha.kubernetes.io/critical-pod: ''
7163
spec:
72-
priorityClassName: system-node-critical
7364
serviceAccountName: fluentd-es
7465
containers:
7566
- name: fluentd-es

images/docker-elasticsearch/config/log4j2.properties

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ status = error
33
appender.console.type = Console
44
appender.console.name = console
55
appender.console.layout.type = PatternLayout
6-
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
6+
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
77

88
rootLogger.level = info
99
rootLogger.appenderRef.console.ref = console
File renamed without changes.
File renamed without changes.

separate-roles/deploy

+58
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
#!/usr/bin/env bash
2+
3+
if [ -z "${KUBECONFIG}" ]; then
4+
export KUBECONFIG=~/.kube/config
5+
fi
6+
7+
# CAUTION - setting NAMESPACE will deploy most components to the given namespace
8+
# however some are hardcoded to 'monitoring'. Only use if you have reviewed all manifests.
9+
10+
if [ -z "${NAMESPACE}" ]; then
11+
NAMESPACE=logging
12+
fi
13+
14+
kubectl create namespace "$NAMESPACE"
15+
16+
kctl() {
17+
kubectl --namespace "$NAMESPACE" "$@"
18+
}
19+
# alias kctl='kubectl --namespace logging'
20+
21+
# Deploy ElasticSearch configmap
22+
kctl apply -f es-configmap.yaml
23+
24+
# Deploy Elasticsearch master node and wait until it's up
25+
kctl apply -f es-master-svc.yaml
26+
kctl apply -f es-master-statefulset.yaml
27+
until kctl rollout status statefulset es-master > /dev/null 2>&1; do sleep 1; printf "."; done
28+
29+
# Deploy Elasticsearch client node and wait until it's up
30+
kctl apply -f es-ingest-svc.yaml
31+
kctl apply -f es-ingest-statefulset.yaml
32+
until kctl rollout status deployment es-client > /dev/null 2>&1; do sleep 1; printf "."; done
33+
34+
# Deploy Elasticsearch data node and wait until it's up
35+
kctl apply -f es-data-svc.yaml
36+
kctl apply -f es-data-statefulset.yaml
37+
until kctl rollout status statefulset es-data > /dev/null 2>&1; do sleep 1; printf "."; done
38+
39+
# Deploy Curator
40+
kctl apply -f es-curator-configmap.yaml
41+
kctl apply -f es-curator-cronjob.yaml
42+
43+
# Deploy Cerebro
44+
kctl apply -f cerebro.yaml
45+
kctl apply -f cerebro-external-ingress.yaml
46+
47+
# Deploy Kibana
48+
kctl apply -f kibana-configmap.yaml
49+
kctl apply -f kibana-svc.yaml
50+
kctl apply -f kibana-deployment.yaml
51+
kctl apply -f kibana-external-ingress.yaml
52+
53+
# Deploy Fluentd
54+
kctl apply -f fluentd-configmap.yaml
55+
kctl apply -f fluentd-daemonset.yaml
56+
57+
echo "done!"
58+

es-data-statefulset.yaml renamed to separate-roles/es-data-statefulset.yaml

+7-11
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,12 @@ spec:
5656
value: "false"
5757
- name: NODE_INGEST
5858
value: "false"
59+
- name: NODE_DATA
60+
value: "true"
5961
- name: HTTP_ENABLE
6062
value: "true"
6163
- name: ES_JAVA_OPTS
62-
value: -Xms1g -Xmx1g
64+
value: -Xms512m -Xmx512m
6365
- name: PROCESSORS
6466
valueFrom:
6567
resourceFieldRef:
@@ -69,19 +71,13 @@ spec:
6971
livenessProbe:
7072
tcpSocket:
7173
port: transport
72-
initialDelaySeconds: 180
73-
periodSeconds: 10
74-
readinessProbe:
75-
httpGet:
76-
path: /_cluster/health
77-
port: http
78-
initialDelaySeconds: 180
79-
timeoutSeconds: 60
74+
initialDelaySeconds: 360
75+
periodSeconds: 30
8076
volumeMounts:
8177
- name: config
8278
mountPath: /elasticsearch/config/elasticsearch.yml
8379
subPath: elasticsearch.yml
84-
- name: storage-data
80+
- name: elasticsearch-data
8581
mountPath: /data
8682
affinity:
8783
podAntiAffinity:
@@ -105,7 +101,7 @@ spec:
105101
name: es-configmap
106102
volumeClaimTemplates:
107103
- metadata:
108-
name: storage-data
104+
name: elasticsearch-data
109105
spec:
110106
accessModes: [ "ReadWriteOnce" ]
111107
resources:
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
apiVersion: v1
22
kind: Service
33
metadata:
4-
name: elasticsearch-data
4+
name: elasticsearch-data-discovery
55
labels:
66
component: elasticsearch
77
role: data
@@ -10,6 +10,6 @@ spec:
1010
component: elasticsearch
1111
role: data
1212
ports:
13-
- port: 9200
14-
name: http
13+
- port: 9300
14+
name: transport
1515
clusterIP: None
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,16 @@
1-
kind: PersistentVolumeClaim
2-
apiVersion: v1
3-
metadata:
4-
name: elasticsearch-ingest
5-
spec:
6-
accessModes:
7-
- ReadWriteMany
8-
resources:
9-
requests:
10-
storage: 5Gi
11-
---
12-
apiVersion: apps/v1beta1
13-
kind: Deployment
1+
apiVersion: apps/v1
2+
kind: StatefulSet
143
metadata:
154
name: es-ingest
165
labels:
176
component: elasticsearch
187
role: ingest
198
spec:
9+
selector:
10+
matchLabels:
11+
component: elasticsearch
12+
role: ingest
13+
serviceName: elasticsearch-ingest
2014
replicas: 1
2115
template:
2216
metadata:
@@ -49,21 +43,23 @@ spec:
4943
value: myesdb
5044
- name: NODE_MASTER
5145
value: "false"
46+
- name: NODE_INGEST
47+
value: "true"
5248
- name: NODE_DATA
5349
value: "false"
5450
- name: HTTP_ENABLE
5551
value: "true"
5652
- name: ES_JAVA_OPTS
57-
value: -Xms1g -Xmx1g
53+
value: -Xms512m -Xmx512m
5854
- name: NETWORK_HOST
5955
value: _site_,_lo_
6056
- name: PROCESSORS
6157
valueFrom:
6258
resourceFieldRef:
6359
resource: limits.cpu
64-
#- name: "ES_PLUGINS_INSTALL"
65-
# value: "x-pack"
6660
resources:
61+
requests:
62+
cpu: 1
6763
limits:
6864
cpu: 2
6965
ports:
@@ -72,27 +68,26 @@ spec:
7268
- containerPort: 9300
7369
name: transport
7470
livenessProbe:
75-
tcpSocket:
76-
port: transport
77-
initialDelaySeconds: 180
78-
periodSeconds: 10
79-
readinessProbe:
80-
httpGet:
71+
httpGet:
8172
path: /_cluster/health
82-
port: http
83-
initialDelaySeconds: 180
73+
port: 9200
74+
initialDelaySeconds: 120
8475
timeoutSeconds: 60
8576
volumeMounts:
86-
- name: storage
77+
- name: elasticsearch-ingest
8778
mountPath: /data
8879
- name: config
8980
mountPath: /elasticsearch/config/elasticsearch.yml
9081
subPath: elasticsearch.yml
9182
volumes:
92-
- name: storage
93-
persistentVolumeClaim:
94-
claimName: elasticsearch-ingest
9583
- name : config
9684
configMap:
9785
name: es-configmap
98-
86+
volumeClaimTemplates:
87+
- metadata:
88+
name: elasticsearch-ingest
89+
spec:
90+
accessModes: [ ReadWriteOnce ]
91+
resources:
92+
requests:
93+
storage: 5Gi

0 commit comments

Comments
 (0)