• kubernetes/cluster/addons/fluentd-elasticsearch


    #发文福利#

    一、前言

    kubernetes 1.23搭建EFK所用到的yaml文件,本帖均来自kubernetes官方,且没做修改。

    https://github.com/kubernetes/kubernetes/tree/release-1.23/cluster/addons/fluentd-elasticsearch

    二、EFK 原版yaml

    1、create-logging-namespace.yaml

    1. kind: Namespace
    2. apiVersion: v1
    3. metadata:
    4. name: logging
    5. labels:
    6. k8s-app: logging
    7. kubernetes.io/cluster-service: "true"
    8. addonmanager.kubernetes.io/mode: Reconcile

    2、es-service.yaml

    1. apiVersion: v1
    2. kind: Service
    3. metadata:
    4. name: elasticsearch-logging
    5. namespace: logging
    6. labels:
    7. k8s-app: elasticsearch-logging
    8. kubernetes.io/cluster-service: "true"
    9. addonmanager.kubernetes.io/mode: Reconcile
    10. kubernetes.io/name: "Elasticsearch"
    11. spec:
    12. clusterIP: None
    13. ports:
    14. - name: db
    15. port: 9200
    16. protocol: TCP
    17. targetPort: 9200
    18. - name: transport
    19. port: 9300
    20. protocol: TCP
    21. targetPort: 9300
    22. publishNotReadyAddresses: true
    23. selector:
    24. k8s-app: elasticsearch-logging
    25. sessionAffinity: None
    26. type: ClusterIP

    3、es-statefulset.yaml

    1. # RBAC authn and authz
    2. apiVersion: v1
    3. kind: ServiceAccount
    4. metadata:
    5. name: elasticsearch-logging
    6. namespace: logging
    7. labels:
    8. k8s-app: elasticsearch-logging
    9. addonmanager.kubernetes.io/mode: Reconcile
    10. ---
    11. kind: ClusterRole
    12. apiVersion: rbac.authorization.k8s.io/v1
    13. metadata:
    14. name: elasticsearch-logging
    15. labels:
    16. k8s-app: elasticsearch-logging
    17. addonmanager.kubernetes.io/mode: Reconcile
    18. rules:
    19. - apiGroups:
    20. - ""
    21. resources:
    22. - "services"
    23. - "namespaces"
    24. - "endpoints"
    25. verbs:
    26. - "get"
    27. ---
    28. kind: ClusterRoleBinding
    29. apiVersion: rbac.authorization.k8s.io/v1
    30. metadata:
    31. name: elasticsearch-logging
    32. labels:
    33. k8s-app: elasticsearch-logging
    34. addonmanager.kubernetes.io/mode: Reconcile
    35. subjects:
    36. - kind: ServiceAccount
    37. name: elasticsearch-logging
    38. namespace: logging
    39. apiGroup: ""
    40. roleRef:
    41. kind: ClusterRole
    42. name: elasticsearch-logging
    43. apiGroup: ""
    44. ---
    45. # Elasticsearch deployment itself
    46. apiVersion: apps/v1
    47. kind: StatefulSet
    48. metadata:
    49. name: elasticsearch-logging
    50. namespace: logging
    51. labels:
    52. k8s-app: elasticsearch-logging
    53. version: v7.10.2
    54. addonmanager.kubernetes.io/mode: Reconcile
    55. spec:
    56. serviceName: elasticsearch-logging
    57. replicas: 2
    58. selector:
    59. matchLabels:
    60. k8s-app: elasticsearch-logging
    61. version: v7.10.2
    62. template:
    63. metadata:
    64. labels:
    65. k8s-app: elasticsearch-logging
    66. version: v7.10.2
    67. spec:
    68. serviceAccountName: elasticsearch-logging
    69. containers:
    70. - image: quay.io/fluentd_elasticsearch/elasticsearch:v7.10.2
    71. name: elasticsearch-logging
    72. imagePullPolicy: Always
    73. resources:
    74. # need more cpu upon initialization, therefore burstable class
    75. limits:
    76. cpu: 1000m
    77. memory: 3Gi
    78. requests:
    79. cpu: 100m
    80. memory: 3Gi
    81. ports:
    82. - containerPort: 9200
    83. name: db
    84. protocol: TCP
    85. - containerPort: 9300
    86. name: transport
    87. protocol: TCP
    88. livenessProbe:
    89. tcpSocket:
    90. port: transport
    91. initialDelaySeconds: 5
    92. timeoutSeconds: 10
    93. readinessProbe:
    94. tcpSocket:
    95. port: transport
    96. initialDelaySeconds: 5
    97. timeoutSeconds: 10
    98. volumeMounts:
    99. - name: elasticsearch-logging
    100. mountPath: /data
    101. env:
    102. - name: "NAMESPACE"
    103. valueFrom:
    104. fieldRef:
    105. fieldPath: metadata.namespace
    106. - name: "MINIMUM_MASTER_NODES"
    107. value: "1"
    108. volumes:
    109. - name: elasticsearch-logging
    110. emptyDir: {}
    111. # Elasticsearch requires vm.max_map_count to be at least 262144.
    112. # If your OS already sets up this number to a higher value, feel free
    113. # to remove this init container.
    114. initContainers:
    115. - image: alpine:3.6
    116. command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
    117. name: elasticsearch-logging-init
    118. securityContext:
    119. privileged: true

    4、fluentd-es-configmap.yaml

    1. kind: ConfigMap
    2. apiVersion: v1
    3. metadata:
    4. name: fluentd-es-config-v0.2.1
    5. namespace: logging
    6. labels:
    7. addonmanager.kubernetes.io/mode: Reconcile
    8. data:
    9. system.conf: |-
    10. root_dir /tmp/fluentd-buffers/
    11. containers.input.conf: |-
    12. # This configuration file for Fluentd / td-agent is used
    13. # to watch changes to Docker log files. The kubelet creates symlinks that
    14. # capture the pod name, namespace, container name & Docker container ID
    15. # to the docker logs for pods in the /var/log/containers directory on the host.
    16. # If running this fluentd configuration in a Docker container, the /var/log
    17. # directory should be mounted in the container.
    18. #
    19. # These logs are then submitted to Elasticsearch which assumes the
    20. # installation of the fluent-plugin-elasticsearch & the
    21. # fluent-plugin-kubernetes_metadata_filter plugins.
    22. # See https://github.com/uken/fluent-plugin-elasticsearch &
    23. # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
    24. # more information about the plugins.
    25. #
    26. # Example
    27. # =======
    28. # A line in the Docker log file might look like this JSON:
    29. #
    30. # {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
    31. # "stream":"stderr",
    32. # "time":"2014-09-25T21:15:03.499185026Z"}
    33. #
    34. # The time_format specification below makes sure we properly
    35. # parse the time format produced by Docker. This will be
    36. # submitted to Elasticsearch and should appear like:
    37. # $ curl 'http://elasticsearch-logging:9200/_search?pretty'
    38. # ...
    39. # {
    40. # "_index" : "logstash-2014.09.25",
    41. # "_type" : "fluentd",
    42. # "_id" : "VBrbor2QTuGpsQyTCdfzqA",
    43. # "_score" : 1.0,
    44. # "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
    45. # "stream":"stderr","tag":"docker.container.all",
    46. # "@timestamp":"2014-09-25T22:45:50+00:00"}
    47. # },
    48. # ...
    49. #
    50. # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
    51. # record & add labels to the log record if properly configured. This enables users
    52. # to filter & search logs on any metadata.
    53. # For example a Docker container's logs might be in the directory:
    54. #
    55. # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
    56. #
    57. # and in the file:
    58. #
    59. # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
    60. #
    61. # where 997599971ee6... is the Docker ID of the running container.
    62. # The Kubernetes kubelet makes a symbolic link to this file on the host machine
    63. # in the /var/log/containers directory which includes the pod name and the Kubernetes
    64. # container name:
    65. #
    66. # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
    67. # ->
    68. # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
    69. #
    70. # The /var/log directory on the host is mapped to the /var/log directory in the container
    71. # running this instance of Fluentd and we end up collecting the file:
    72. #
    73. # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
    74. #
    75. # This results in the tag:
    76. #
    77. # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
    78. #
    79. # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
    80. # which are added to the log message as a kubernetes field object & the Docker container ID
    81. # is also added under the docker field object.
    82. # The final tag is:
    83. #
    84. # kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
    85. #
    86. # And the final log record look like:
    87. #
    88. # {
    89. # "log":"2014/09/25 21:15:03 Got request with path wombat\n",
    90. # "stream":"stderr",
    91. # "time":"2014-09-25T21:15:03.499185026Z",
    92. # "kubernetes": {
    93. # "namespace": "default",
    94. # "pod_name": "synthetic-logger-0.25lps-pod",
    95. # "container_name": "synth-lgr"
    96. # },
    97. # "docker": {
    98. # "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
    99. # }
    100. # }
    101. #
    102. # This makes it easier for users to search for logs by pod name or by
    103. # the name of the Kubernetes container regardless of how many times the
    104. # Kubernetes pod has been restarted (resulting in a several Docker container IDs).
    105. # Json Log Example:
    106. # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
    107. # CRI Log Example:
    108. # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
    109. @id fluentd-containers.log
    110. @type tail
    111. path /var/log/containers/*.log
    112. pos_file /var/log/es-containers.log.pos
    113. tag raw.kubernetes.*
    114. read_from_head true
    115. @type multi_format
    116. format json
    117. time_key time
    118. time_format %Y-%m-%dT%H:%M:%S.%NZ
    119. format /^(?
    120. time_format %Y-%m-%dT%H:%M:%S.%N%:z
    121. # Detect exceptions in the log output and forward them as one log entry.
    122. @id raw.kubernetes
    123. @type detect_exceptions
    124. remove_tag_prefix raw
    125. message log
    126. stream stream
    127. multiline_flush_interval 5
    128. max_bytes 500000
    129. max_lines 1000
    130. # Concatenate multi-line logs
    131. @id filter_concat
    132. @type concat
    133. key message
    134. multiline_end_regexp /\n$/
    135. separator ""
    136. # Enriches records with Kubernetes metadata
    137. @id filter_kubernetes_metadata
    138. @type kubernetes_metadata
    139. # Fixes json fields in Elasticsearch
    140. @id filter_parser
    141. @type parser
    142. key_name log
    143. reserve_data true
    144. remove_key_name_field true
    145. @type multi_format
    146. format json
    147. format none
    148. system.input.conf: |-
    149. # Example:
    150. # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
    151. @id minion
    152. @type tail
    153. format /^(?
    154. time_format %Y-%m-%d %H:%M:%S
    155. path /var/log/salt/minion
    156. pos_file /var/log/salt.pos
    157. tag salt
    158. # Example:
    159. # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
    160. @id startupscript.log
    161. @type tail
    162. format syslog
    163. path /var/log/startupscript.log
    164. pos_file /var/log/es-startupscript.log.pos
    165. tag startupscript
    166. # Examples:
    167. # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
    168. # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
    169. # TODO(random-liu): Remove this after cri container runtime rolls out.
    170. @id docker.log
    171. @type tail
    172. format /^time="(?
    173. path /var/log/docker.log
    174. pos_file /var/log/es-docker.log.pos
    175. tag docker
    176. # Example:
    177. # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
    178. @id etcd.log
    179. @type tail
    180. # Not parsing this, because it doesn't have anything particularly useful to
    181. # parse out of it (like severities).
    182. format none
    183. path /var/log/etcd.log
    184. pos_file /var/log/es-etcd.log.pos
    185. tag etcd
    186. # Multi-line parsing is required for all the kube logs because very large log
    187. # statements, such as those that include entire object bodies, get split into
    188. # multiple lines by glog.
    189. # Example:
    190. # I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
    191. @id kubelet.log
    192. @type tail
    193. format multiline
    194. multiline_flush_interval 5s
    195. format_firstline /^\w\d{4}/
    196. format1 /^(?\w)(?
    197. time_format %m%d %H:%M:%S.%N
    198. path /var/log/kubelet.log
    199. pos_file /var/log/es-kubelet.log.pos
    200. tag kubelet
    201. # Example:
    202. # I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
    203. @id kube-proxy.log
    204. @type tail
    205. format multiline
    206. multiline_flush_interval 5s
    207. format_firstline /^\w\d{4}/
    208. format1 /^(?\w)(?
    209. time_format %m%d %H:%M:%S.%N
    210. path /var/log/kube-proxy.log
    211. pos_file /var/log/es-kube-proxy.log.pos
    212. tag kube-proxy
    213. # Example:
    214. # I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
    215. @id kube-apiserver.log
    216. @type tail
    217. format multiline
    218. multiline_flush_interval 5s
    219. format_firstline /^\w\d{4}/
    220. format1 /^(?\w)(?
    221. time_format %m%d %H:%M:%S.%N
    222. path /var/log/kube-apiserver.log
    223. pos_file /var/log/es-kube-apiserver.log.pos
    224. tag kube-apiserver
    225. # Example:
    226. # I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
    227. @id kube-controller-manager.log
    228. @type tail
    229. format multiline
    230. multiline_flush_interval 5s
    231. format_firstline /^\w\d{4}/
    232. format1 /^(?\w)(?
    233. time_format %m%d %H:%M:%S.%N
    234. path /var/log/kube-controller-manager.log
    235. pos_file /var/log/es-kube-controller-manager.log.pos
    236. tag kube-controller-manager
    237. # Example:
    238. # W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
    239. @id kube-scheduler.log
    240. @type tail
    241. format multiline
    242. multiline_flush_interval 5s
    243. format_firstline /^\w\d{4}/
    244. format1 /^(?\w)(?
    245. time_format %m%d %H:%M:%S.%N
    246. path /var/log/kube-scheduler.log
    247. pos_file /var/log/es-kube-scheduler.log.pos
    248. tag kube-scheduler
    249. # Example:
    250. # I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
    251. @id glbc.log
    252. @type tail
    253. format multiline
    254. multiline_flush_interval 5s
    255. format_firstline /^\w\d{4}/
    256. format1 /^(?\w)(?
    257. time_format %m%d %H:%M:%S.%N
    258. path /var/log/glbc.log
    259. pos_file /var/log/es-glbc.log.pos
    260. tag glbc
    261. # Example:
    262. # I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
    263. @id cluster-autoscaler.log
    264. @type tail
    265. format multiline
    266. multiline_flush_interval 5s
    267. format_firstline /^\w\d{4}/
    268. format1 /^(?\w)(?
    269. time_format %m%d %H:%M:%S.%N
    270. path /var/log/cluster-autoscaler.log
    271. pos_file /var/log/es-cluster-autoscaler.log.pos
    272. tag cluster-autoscaler
    273. # Logs from systemd-journal for interesting services.
    274. # TODO(random-liu): Remove this after cri container runtime rolls out.
    275. @id journald-docker
    276. @type systemd
    277. matches [{ "_SYSTEMD_UNIT": "docker.service" }]
    278. @type local
    279. persistent true
    280. path /var/log/journald-docker.pos
    281. read_from_head true
    282. tag docker
    283. @id journald-container-runtime
    284. @type systemd
    285. matches [{ "_SYSTEMD_UNIT": "{{ fluentd_container_runtime_service }}.service" }]
    286. @type local
    287. persistent true
    288. path /var/log/journald-container-runtime.pos
    289. read_from_head true
    290. tag container-runtime
    291. @id journald-kubelet
    292. @type systemd
    293. matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
    294. @type local
    295. persistent true
    296. path /var/log/journald-kubelet.pos
    297. read_from_head true
    298. tag kubelet
    299. @id journald-node-problem-detector
    300. @type systemd
    301. matches [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
    302. @type local
    303. persistent true
    304. path /var/log/journald-node-problem-detector.pos
    305. read_from_head true
    306. tag node-problem-detector
    307. @id kernel
    308. @type systemd
    309. matches [{ "_TRANSPORT": "kernel" }]
    310. @type local
    311. persistent true
    312. path /var/log/kernel.pos
    313. fields_strip_underscores true
    314. fields_lowercase true
    315. read_from_head true
    316. tag kernel
    317. forward.input.conf: |-
    318. # Takes the messages sent over TCP
    319. @id forward
    320. @type forward
    321. monitoring.conf: |-
    322. # Prometheus Exporter Plugin
    323. # input plugin that exports metrics
    324. @id prometheus
    325. @type prometheus
    326. @id monitor_agent
    327. @type monitor_agent
    328. # input plugin that collects metrics from MonitorAgent
    329. @id prometheus_monitor
    330. @type prometheus_monitor
    331. host ${hostname}
    332. # input plugin that collects metrics for output plugin
    333. @id prometheus_output_monitor
    334. @type prometheus_output_monitor
    335. host ${hostname}
    336. # input plugin that collects metrics for in_tail plugin
    337. @id prometheus_tail_monitor
    338. @type prometheus_tail_monitor
    339. host ${hostname}
    340. output.conf: |-
    341. @id elasticsearch
    342. @type elasticsearch
    343. @log_level info
    344. type_name _doc
    345. include_tag_key true
    346. host elasticsearch-logging
    347. port 9200
    348. logstash_format true
    349. @type file
    350. path /var/log/fluentd-buffers/kubernetes.system.buffer
    351. flush_mode interval
    352. retry_type exponential_backoff
    353. flush_thread_count 2
    354. flush_interval 5s
    355. retry_forever
    356. retry_max_interval 30
    357. chunk_limit_size 2M
    358. total_limit_size 500M
    359. overflow_action block

    5、fluentd-es-ds.yaml

    1. apiVersion: v1
    2. kind: ServiceAccount
    3. metadata:
    4. name: fluentd-es
    5. namespace: logging
    6. labels:
    7. k8s-app: fluentd-es
    8. addonmanager.kubernetes.io/mode: Reconcile
    9. ---
    10. kind: ClusterRole
    11. apiVersion: rbac.authorization.k8s.io/v1
    12. metadata:
    13. name: fluentd-es
    14. labels:
    15. k8s-app: fluentd-es
    16. addonmanager.kubernetes.io/mode: Reconcile
    17. rules:
    18. - apiGroups:
    19. - ""
    20. resources:
    21. - "namespaces"
    22. - "pods"
    23. verbs:
    24. - "get"
    25. - "watch"
    26. - "list"
    27. ---
    28. kind: ClusterRoleBinding
    29. apiVersion: rbac.authorization.k8s.io/v1
    30. metadata:
    31. name: fluentd-es
    32. labels:
    33. k8s-app: fluentd-es
    34. addonmanager.kubernetes.io/mode: Reconcile
    35. subjects:
    36. - kind: ServiceAccount
    37. name: fluentd-es
    38. namespace: logging
    39. apiGroup: ""
    40. roleRef:
    41. kind: ClusterRole
    42. name: fluentd-es
    43. apiGroup: ""
    44. ---
    45. apiVersion: apps/v1
    46. kind: DaemonSet
    47. metadata:
    48. name: fluentd-es-v3.1.1
    49. namespace: logging
    50. labels:
    51. k8s-app: fluentd-es
    52. version: v3.1.1
    53. addonmanager.kubernetes.io/mode: Reconcile
    54. spec:
    55. selector:
    56. matchLabels:
    57. k8s-app: fluentd-es
    58. version: v3.1.1
    59. template:
    60. metadata:
    61. labels:
    62. k8s-app: fluentd-es
    63. version: v3.1.1
    64. spec:
    65. securityContext:
    66. seccompProfile:
    67. type: RuntimeDefault
    68. priorityClassName: system-node-critical
    69. serviceAccountName: fluentd-es
    70. containers:
    71. - name: fluentd-es
    72. image: quay.io/fluentd_elasticsearch/fluentd:v3.1.0
    73. env:
    74. - name: FLUENTD_ARGS
    75. value: --no-supervisor -q
    76. resources:
    77. limits:
    78. memory: 500Mi
    79. requests:
    80. cpu: 100m
    81. memory: 200Mi
    82. volumeMounts:
    83. - name: varlog
    84. mountPath: /var/log
    85. - name: varlibdockercontainers
    86. mountPath: /var/lib/docker/containers
    87. readOnly: true
    88. - name: config-volume
    89. mountPath: /etc/fluent/config.d
    90. ports:
    91. - containerPort: 24231
    92. name: prometheus
    93. protocol: TCP
    94. livenessProbe:
    95. tcpSocket:
    96. port: prometheus
    97. initialDelaySeconds: 5
    98. timeoutSeconds: 10
    99. readinessProbe:
    100. tcpSocket:
    101. port: prometheus
    102. initialDelaySeconds: 5
    103. timeoutSeconds: 10
    104. terminationGracePeriodSeconds: 30
    105. volumes:
    106. - name: varlog
    107. hostPath:
    108. path: /var/log
    109. - name: varlibdockercontainers
    110. hostPath:
    111. path: /var/lib/docker/containers
    112. - name: config-volume
    113. configMap:
    114. name: fluentd-es-config-v0.2.1

    6、kibana-deployment.yaml

    1. apiVersion: apps/v1
    2. kind: Deployment
    3. metadata:
    4. name: kibana-logging
    5. namespace: logging
    6. labels:
    7. k8s-app: kibana-logging
    8. addonmanager.kubernetes.io/mode: Reconcile
    9. spec:
    10. replicas: 1
    11. selector:
    12. matchLabels:
    13. k8s-app: kibana-logging
    14. template:
    15. metadata:
    16. labels:
    17. k8s-app: kibana-logging
    18. spec:
    19. securityContext:
    20. seccompProfile:
    21. type: RuntimeDefault
    22. containers:
    23. - name: kibana-logging
    24. image: docker.elastic.co/kibana/kibana-oss:7.10.2
    25. resources:
    26. # need more cpu upon initialization, therefore burstable class
    27. limits:
    28. cpu: 1000m
    29. requests:
    30. cpu: 100m
    31. env:
    32. - name: ELASTICSEARCH_HOSTS
    33. value: http://elasticsearch-logging:9200
    34. - name: SERVER_NAME
    35. value: kibana-logging
    36. - name: SERVER_BASEPATH
    37. value: /api/v1/namespaces/logging/services/kibana-logging/proxy
    38. - name: SERVER_REWRITEBASEPATH
    39. value: "false"
    40. ports:
    41. - containerPort: 5601
    42. name: ui
    43. protocol: TCP
    44. livenessProbe:
    45. httpGet:
    46. path: /api/status
    47. port: ui
    48. initialDelaySeconds: 5
    49. timeoutSeconds: 10
    50. readinessProbe:
    51. httpGet:
    52. path: /api/status
    53. port: ui
    54. initialDelaySeconds: 5
    55. timeoutSeconds: 10

    7、kibana-service.yaml

    1. apiVersion: v1
    2. kind: Service
    3. metadata:
    4. name: kibana-logging
    5. namespace: logging
    6. labels:
    7. k8s-app: kibana-logging
    8. kubernetes.io/cluster-service: "true"
    9. addonmanager.kubernetes.io/mode: Reconcile
    10. kubernetes.io/name: "Kibana"
    11. spec:
    12. ports:
    13. - port: 5601
    14. protocol: TCP
    15. targetPort: ui
    16. selector:
    17. k8s-app: kibana-logging
  • 相关阅读:
    Springboot中如何访问静态资源?
    【Terraform】Terraform自动创建云服务器脚本
    用纯RUST实现音视频流媒体服务(RTMP/HTTPFLV/HLS)XIU
    【深入浅出 Yarn 架构与实现】1-2 搭建 Hadoop 源码阅读环境
    C#界面里的AllowDrop属性、DragDrop和DragEnter事件
    【过滤器设计模式详解】C/Java/JS/Go/Python/TS不同语言实现
    OpenMP task construct 实现原理以及源码分析
    encodeURIComponent对url参数进行编码
    MAC软件推荐(Java方向)
    文件分片上传和断点续传
  • 原文地址:https://blog.csdn.net/xoofly/article/details/132663415