#拉取镜像
docker pull elasticsearch:7.12.1
#创建ELK docker网络
docker network create elk
#启动ELK
docker run -d --name es --net elk -P -e "discovery.type=single-node" elasticsearch:7.12.1
#拷贝配置文件
docker cp es:/usr/share/elasticsearch/config/elasticsearch.yml /app/es/elasticsearch.yml
#删除容器 重新启动
docker run -d --name es \
--net elk \
-p 9200:9200 -p 9300:9300 \
-e "discovery.type=single-node" \
--privileged=true \
-v /app/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-v /app/es/data/:/usr/share/elasticsearch/data \
elasticsearch:7.12.1
如果报错jvm内存太小,在宿主机查询到容器的配置文件,并修改内存大小
find / -name jvm.options
验证
#拉取镜像
docker pull kibana:7.12.1
# 启动 kibana 容器并连接同一网络
docker run -d --name kibana --net elk -P -e "ELASTICSEARCH_HOSTS=http://es:9200" -e "I18N_LOCALE=zh-CN" kibana:7.12.1
#拷贝配置文件
docker cp kibana:/usr/share/kibana/config/kibana.yml /app/kibana/
vi /app/kibana/kibana.yml 把elasticsearch所在服务器ip配置下
#删除容器 重新启动
docker run -d --name kibana \
-p 5601:5601 \
-v /app/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml \
--net elk \
kibana:7.12.1
验证
docker pull logstash:7.12.1
docker run -d -P --name logstash --net elk logstash:7.12.1
# 拷贝数据
docker cp logstash:/usr/share/logstash/config /app/logstash/
docker cp logstash:/usr/share/logstash/data /app/logstash/
docker cp logstash:/usr/share/logstash/pipeline /app/logstash/
#文件夹赋权
chmod -R 777 logstash/
vi /app/logstash/config/logstash.yml 配置es ip
vi /app/logstash/pipeline/logstash.conf
logstash.conf
input {
tcp {
mode => "server"
host => "0.0.0.0" # 允许任意主机发送日志
port => 5044
codec => json_lines # 数据格式
}
}
output {
elasticsearch {
hosts => ["http://106.54.220.184:9200"] # ElasticSearch 的地址和端口
index => "elk" # 指定索引名
codec => "json"
}
stdout {
codec => rubydebug
}
}
修改完配置文件,删除容器、重启容器
docker run -d --name logstash --net elk \
--privileged=true \
-p 5044:5044 -p 9600:9600 \
-v /app/logstash/data/:/usr/share/logstash/data \
-v /app/logstash/config/:/usr/share/logstash/config \
-v /app/logstash/pipeline/:/usr/share/logstash/pipeline \
logstash:7.12.1
验证
引入logstash依赖
<!--集成logstash-->
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.3</version>
</dependency>
logback.xml日志配置
<configuration>
<contextName>ProviderLogcontextName>
<property name="LOG_HOME" value="home" />
<property name="appName" value="wdnmdService" />
<springProperty scope="context" name="springAppName" source="spring.application.name"/>
<springProperty scope="context" name="serverPort" source="server.port"/>
<appender name="stash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>106.54.220.184:5044destination>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder" >
<providers>
<logLevel/>
<callerData/>
<timestamp>
<timeZone>UTCtimeZone>
timestamp>
<pattern>
<pattern>
{
"app": "${springAppName}_${serverPort}",
"level": "%level",
"thread": "%thread",
"class": "%logger{40}",
"message": "%message"
}
pattern>
pattern>
providers>
encoder>
appender>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%npattern>
encoder>
appender>
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${LOG_HOME}/system.log.%d{yyyy-MM-dd}.logFileNamePattern>
<MaxHistory>30MaxHistory>
rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%npattern>
encoder>
<triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
<MaxFileSize>10MBMaxFileSize>
triggeringPolicy>
appender>
<root level="INFO">
<appender-ref ref="stash" />
<appender-ref ref="STDOUT" />
root>
configuration>
由于在配置中已经指定了索引名称 index => “elk”
创建索引:kibana-Stack Management-索引模式-创建索引-elk
查询日志:kibana-analytics-discover