为了在服务中使用 JSON 格式的日志,搭建 fluentd 和 logstash 测试环境,整理相关配置的详细资料。
文档:https://docs.fluentd.org/v/0.12/articles/docker-logging-efk-compose
docker compose:
version: "3"
services:
elasticsearch:
image: elasticsearch:8.4.3
container_name: elasticsearch
restart: always
environment:
- cluster.name=elasticsearch
- discovery.type=single-node
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.enabled=false
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 262144
hard: 262144
volumes:
- elasticsearch:/usr/share/elasticsearch/data
ports:
- 9200:9200
- 9600:9600 # required for Performance Analyzer
networks:
- efk-net
kibana:
image: kibana:8.4.3
container_name: kibana
restart: always
ports:
- 5601:5601
expose:
- "5601"
environment:
ELASTICSEARCH_URL: http://elasticsearch:9200
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
networks:
- efk-net
fluentd:
build: ./fluentd
volumes:
- ./fluentd/conf:/fluentd/etc
links:
- "elasticsearch"
restart: always
container_name: fluentd
ports:
- "24224:24224"
- "24224:24224/udp"
networks:
- efk-net
web:
image: httpd
container_name: web
ports:
- "80:80"
links:
- fluentd
networks:
- efk-net
logging:
driver: "fluentd"
options:
fluentd-address: localhost:24224
tag: httpd.access
volumes:
elasticsearch:
networks:
efk-net:
./fluentd/conf/fluent.conf:
# fluentd/conf/fluent.conf
@type forward
port 24224
bind 0.0.0.0
@type copy
@type elasticsearch
host elasticsearch
port 9200
logstash_format true
logstash_prefix fluentd
logstash_dateformat %Y%m%d
include_tag_key true
type_name access_log
tag_key @log_name
flush_interval 1s
@type stdout
文档:https://www.elastic.co/guide/en/logstash/current/introduction.html
docker compose:
version: "3"
services:
elasticsearch:
image: elasticsearch:8.4.3
container_name: elasticsearch
restart: always
environment:
- cluster.name=elasticsearch
- discovery.type=single-node
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- xpack.security.enabled=false
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 262144
hard: 262144
volumes:
- elasticsearch:/usr/share/elasticsearch/data
ports:
- 9200:9200
- 9600:9600 # required for Performance Analyzer
networks:
- efk-net
kibana:
image: kibana:8.4.3
container_name: kibana
restart: always
ports:
- 5601:5601
expose:
- "5601"
environment:
ELASTICSEARCH_URL: http://elasticsearch:9200
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
networks:
- efk-net
logstash:
image: logstash:8.5.0
volumes:
- ./logstash/:/usr/share/logstash/pipeline/
links:
- "elasticsearch"
restart: always
container_name: logstash
ports:
- "5000:5000"
networks:
- efk-net
web:
image: httpd
container_name: web
ports:
- "80:80"
links:
- logstash
networks:
- efk-net
logging:
driver: "syslog"
options:
syslog-address: "tcp://192.168.0.112:5000"
volumes:
elasticsearch:
networks:
efk-net:
./logstash/logstash.conf:
input {
tcp {
port => 5000
codec => json_lines
}
}
output {
elasticsearch {
hosts => ["elasticsearch:9200"]
index => "applog"
}
}
logstash
文档: https://github.com/logfellow/logstash-logback-encoder
<configuration>
<appender name="stash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>127.0.0.1:4560destination>
<encoder class="net.logstash.logback.encoder.LogstashEncoder" />
appender>
<root level="DEBUG">
<appender-ref ref="stash" />
root>
configuration>
fluentd
文档:https://github.com/sndyuk/logback-more-appenders
示例:logback-appenders-fluentd.xml
<appender name="FLUENT_SYNC"
class="ch.qos.logback.more.appenders.DataFluentAppender">
<tag>debugtag>
<label>logbacklabel>
<remoteHost>localhostremoteHost>
<port>24224port>
<ignoredField>throwableignoredField>
<ignoredField>threadignoredField>
<bufferCapacity>16777216bufferCapacity>
<timeout>10000timeout>
<flattenMapMarker>falseflattenMapMarker>
<markerPrefix>markerPrefix>
<encoder>
<pattern>pattern>
encoder>
<messageFieldKeyName>msgmessageFieldKeyName>
appender>
<appender name="FLUENT" class="ch.qos.logback.classic.AsyncAppender">
<queueSize>999queueSize>
<neverBlock>trueneverBlock>
<maxFlushTime>1000maxFlushTime>
<appender-ref ref="FLUENT_SYNC" />
appender>
logstash 通过配置使用纳秒:
容器日志重定向到 fluentd 或 logstash 的相关配置。
<appender name="stash_sync" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>IP:portdestination>
<encoder class="net.logstash.logback.encoder.LogstashEncoder" >
<provider class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider"/>
encoder>
appender>
文档: https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html
Kibana 可以按步操作。也可以直接操作ES。
PUT _index_template/template_1
{
"index_patterns": ["te*", "bar*"],
"template": {
"settings": {
"number_of_shards": 1
},
"mappings": {
"_source": {
"enabled": true
},
"properties": {
"host_name": {
"type": "keyword"
},
"created_at": {
"type": "date",
"format": "EEE MMM dd HH:mm:ss Z yyyy"
}
}
},
"aliases": {
"mydata": { }
}
},
"priority": 500,
"composed_of": ["component_template1", "runtime_component_template"],
"version": 3,
"_meta": {
"description": "my custom"
}
}
文档: https://www.elastic.co/guide/en/elasticsearch/reference/current/explicit-mapping.html
PUT /my-index-000001/_mapping
{
"properties": {
"employee-id": {
"type": "keyword",
"index": false
}
}
}