安装docker-compose
相关版本:
ES:8.5.0
kibana:8.5.0
logstash:8.5.0
# 在指定路径创建配置文件
vim docker-compose-es-kibana-logstash.yaml
version: "3"
services:
elasticsearch:
restart: always
image: docker.elastic.co/elasticsearch/elasticsearch:8.5.0
container_name: elasticsearch
hostname: elasticsearch
network_mode: host
privileged: true
ulimits:
memlock:
soft: -1
hard: -1
environment:
- "ES_JAVA_OPTS=-Xms8192m -Xmx8192m"
- "http.host=0.0.0.0"
- "node.name=es_node01"
- "cluster.name=es_cluster"
- "discovery.type=single-node"
ports:
- "9200:9200"
- "9300:9300"
volumes:
- "/opt/config/es/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml"
- "/opt/config/es/plugins:/usr/share/elasticsearch/plugins:rw"
- "/opt/data/es/data:/usr/share/elasticsearch/data:rw"
kibana:
restart: always
container_name: kibana
image: docker.elastic.co/kibana/kibana:8.5.0
network_mode: host
environment:
- XPACK_MONITORING_COLLECTION_ENABLED="true"
ports:
- "5601:5601"
volumes:
- "/opt/config/kibana/config:/usr/share/kibana/config"
logstash:
image: docker.elastic.co/logstash/logstash:8.5.0
container_name: logstash
network_mode: host
ports:
- "9600:9600"
restart: always
volumes:
- "/opt/config/logstash/config:/usr/share/logstash/config"
- "/opt/config/logstash/pipeline:/usr/share/logstash/pipeline"
- "/opt/data/logstash:/usr/share/logstash/data"
- "/opt/logs/logstash:/usr/share/logstash/logs"
数据存储映射路径配置777权限
chmod -R 777 /opt/data/es/data
es配置文件
# 根据配置文件映射的路径,创建es配置文件
vim /opt/config/es/config/elasticsearch.yml
# 集群节点名称
node.name: "es_node01"
# 设置集群名称为elasticsearch
cluster.name: "es_cluster"
# 网络访问限制
network.host: 0.0.0.0
# 以单一节点模式启动
discovery.type: single-node
# 注意:ES 8.X的版本,需要使用 node.roles配置角色,否则启动报错
#当前该节点是不是有资格竞选主节点
#node.master: true
#当前该节点是否存储数据
#node.data: true
node.roles: [master,data,remote_cluster_client]
# 是否支持跨域
http.cors.enabled: true
# 表示支持所有域名
http.cors.allow-origin: "*"
# 设置映射端口
http.port: 9200
# 内部节点之间沟通端口
transport.port: 9300
# 内存交换的选项,官网建议为true
bootstrap.memory_lock: true
# 修改安全配置、关闭证书校验、启动xpack监控
xpack.security.http.ssl:
enabled: false
xpack.security.transport.ssl:
enabled: false
xpack.security.enabled: false
xpack.monitoring.collection.enabled: true
# http传输内容的最大容量
http.max_content_length: 200mb
数据存储映射路径配置777权限
chmod -R 777 /opt/data/logstash
logstash配置文件
vim /opt/config/logstash/config/logstash.yml
# 开启xpack监控
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.hosts: ["http://localhost:9200"]
xpack.monitoring.collection.interval: 10s
日志文件配置
vim /opt/config/logstash/config/log4j2.properties
status = error
name = LogstashPropertiesConfig
appender.console.type = Console
appender.console.name = plain_console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n
appender.json_console.type = Console
appender.json_console.name = json_console
appender.json_console.layout.type = JSONLayout
appender.json_console.layout.compact = true
appender.json_console.layout.eventEol = true
rootLogger.level = ${sys:ls.log.level}
rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console
pipelines配置文件
vim /opt/config/logstash/config/pipelines.yml
# This file is where you define your pipelines. You can define multiple.
# For more information on multiple pipelines, see the documentation:
# https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html
- pipeline.id: main
path.config: "/usr/share/logstash/pipeline"
配置logstash数据处理流程,依据实际项目需求配置,此处以input为kafka,output为es举例
vim /opt/config/logstash/pipeline/logstash-kafka-es.conf
input {
kafka {
bootstrap_servers => "localhost:9092"
topics => ["topic1"]
codec => "json"
}
}
output {
if [fields][type] == "topic1" {
elasticsearch {
hosts => ["http://localhost:9200"]
index => "topic1-%{+YYYY.MM.dd}"
}
}
}
vim /opt/config/kibana/config/kibana.yml
# Default Kibana configuration for docker target
i18n.locale: zh-CN
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://localhost:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
#此处为新生成的kibana账号和密码
elasticsearch.username: "kibana"
elasticsearch.password: "123456"
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.hosts: ["http://localhost:9200"]
xpack.monitoring.kibana.collection.enabled: true
xpack.monitoring.kibana.collection.interval: 10000
在docker-compose yaml所在的路径运行启动或停车命令
# 启动
docker-compose -f docker-compose-es-kibana-logstash.yaml up -d
# 停止
docker-compose -f docker-compose-es-kibana-logstash.yaml down
es:http://ip:9200
kibana:http://ip:5601
kibana 监控集群状态:kibana --> Management --> 堆栈监测