• Java 输出 JSON 日志


    为了在服务中使用 JSON 格式的日志,搭建 fluentd 和 logstash 测试环境,整理相关配置的详细资料。

    1. fluentd 测试环境

    文档:https://docs.fluentd.org/v/0.12/articles/docker-logging-efk-compose

    docker compose:

    version: "3"
    
    services:
      elasticsearch:
        image: elasticsearch:8.4.3
        container_name: elasticsearch
        restart: always
        environment:
          - cluster.name=elasticsearch
          - discovery.type=single-node
          - bootstrap.memory_lock=true
          - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
          - xpack.security.enabled=false
        ulimits:
          memlock:
            soft: -1
            hard: -1
          nofile:
            soft: 262144
            hard: 262144
        volumes:
          - elasticsearch:/usr/share/elasticsearch/data
        ports:
          - 9200:9200
          - 9600:9600 # required for Performance Analyzer
        networks:
          - efk-net
      kibana:
        image: kibana:8.4.3
        container_name: kibana
        restart: always
        ports:
          - 5601:5601
        expose:
          - "5601"
        environment:
          ELASTICSEARCH_URL: http://elasticsearch:9200
          ELASTICSEARCH_HOSTS: http://elasticsearch:9200
        networks:
          - efk-net
      fluentd:
        build: ./fluentd
        volumes:
          - ./fluentd/conf:/fluentd/etc
        links:
          - "elasticsearch"
        restart: always
        container_name: fluentd
        ports:
          - "24224:24224"
          - "24224:24224/udp"
        networks:
          - efk-net
      web:
        image: httpd
        container_name: web
        ports:
          - "80:80"
        links:
          - fluentd
        networks:
          - efk-net
        logging:
          driver: "fluentd"
          options:
            fluentd-address: localhost:24224
            tag: httpd.access
    volumes:
      elasticsearch:
    
    networks:
      efk-net:
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72

    ./fluentd/conf/fluent.conf:

    # fluentd/conf/fluent.conf
    
      @type forward
      port 24224
      bind 0.0.0.0
    
    
      @type copy
      
        @type elasticsearch
        host elasticsearch
        port 9200
        logstash_format true
        logstash_prefix fluentd
        logstash_dateformat %Y%m%d
        include_tag_key true
        type_name access_log
        tag_key @log_name
        flush_interval 1s
      
      
        @type stdout
      
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24

    2. logstash 测试环境

    文档:https://www.elastic.co/guide/en/logstash/current/introduction.html

    docker compose:

    version: "3"
    
    services:
      elasticsearch:
        image: elasticsearch:8.4.3
        container_name: elasticsearch
        restart: always
        environment:
          - cluster.name=elasticsearch
          - discovery.type=single-node
          - bootstrap.memory_lock=true
          - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
          - xpack.security.enabled=false
        ulimits:
          memlock:
            soft: -1
            hard: -1
          nofile:
            soft: 262144
            hard: 262144
        volumes:
          - elasticsearch:/usr/share/elasticsearch/data
        ports:
          - 9200:9200
          - 9600:9600 # required for Performance Analyzer
        networks:
          - efk-net
      kibana:
        image: kibana:8.4.3
        container_name: kibana
        restart: always
        ports:
          - 5601:5601
        expose:
          - "5601"
        environment:
          ELASTICSEARCH_URL: http://elasticsearch:9200
          ELASTICSEARCH_HOSTS: http://elasticsearch:9200
        networks:
          - efk-net
      logstash:
        image: logstash:8.5.0
        volumes:
          - ./logstash/:/usr/share/logstash/pipeline/
        links:
          - "elasticsearch"
        restart: always
        container_name: logstash
        ports:
          - "5000:5000"
        networks:
          - efk-net
      web:
        image: httpd
        container_name: web
        ports:
          - "80:80"
        links:
          - logstash
        networks:
          - efk-net
        logging:
          driver: "syslog"
          options:
            syslog-address: "tcp://192.168.0.112:5000"
    volumes:
      elasticsearch:
    
    networks:
      efk-net:
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70

    ./logstash/logstash.conf:

    input {
        tcp {
            port => 5000
            codec => json_lines
        }
    }
    
    output {
      elasticsearch {
         hosts => ["elasticsearch:9200"]
         index => "applog"
      }
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13

    3. Java代码集成 logback

    logstash

    文档: https://github.com/logfellow/logstash-logback-encoder

    
    <configuration>
        <appender name="stash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
            <destination>127.0.0.1:4560destination>
    
            
            <encoder class="net.logstash.logback.encoder.LogstashEncoder" />
        appender>
    
        <root level="DEBUG">
            <appender-ref ref="stash" />
        root>
    configuration>
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13

    fluentd

    文档:https://github.com/sndyuk/logback-more-appenders

    示例:logback-appenders-fluentd.xml

    <appender name="FLUENT_SYNC"
                class="ch.qos.logback.more.appenders.DataFluentAppender">
    
        
        <tag>debugtag>
        
        <label>logbacklabel>
    
        
        <remoteHost>localhostremoteHost>
        <port>24224port>
    
        
        
        
    
        <ignoredField>throwableignoredField>
        <ignoredField>threadignoredField>
    
        
        <bufferCapacity>16777216bufferCapacity> 
        <timeout>10000timeout> 
    
        
        <flattenMapMarker>falseflattenMapMarker>
        
        <markerPrefix>markerPrefix>
    
        
        <encoder>
          <pattern>pattern>
        encoder>
    
        
        <messageFieldKeyName>msgmessageFieldKeyName>
    
      appender>
    
      <appender name="FLUENT" class="ch.qos.logback.classic.AsyncAppender">
        
        <queueSize>999queueSize>
        
        <neverBlock>trueneverBlock>
        
        <maxFlushTime>1000maxFlushTime>
        <appender-ref ref="FLUENT_SYNC" />
      appender>
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59

    4. 时间戳

    • fluentd 只能到秒,没有毫秒的情况下,日志顺序会出现混乱。
    • logstash 可以到毫秒,相对fluentd好很多,但是一毫秒输出大量日志时(不合理)也会出现乱序。

    4.1 解决办法

    logstash 通过配置使用纳秒:

    5. 容器日志配置

    容器日志重定向到 fluentd 或 logstash 的相关配置。

    5.1 docker log-driver 配置

    5.2 docker compose logging

    5.3 logstash 使用环境变量

    6. 集成 Skywalking TID

    文档: https://skywalking.apache.org/docs/skywalking-java/next/en/setup/service-agent/java-agent/application-toolkit-logback-1.x/

    <appender name="stash_sync" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
      <destination>IP:portdestination>
    
      
      <encoder class="net.logstash.logback.encoder.LogstashEncoder" >
        <provider class="org.apache.skywalking.apm.toolkit.log.logback.v1.x.logstash.TraceIdJsonProvider"/>
      encoder>
    appender>
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8

    7. 注意点

    7.1 通过索引模板设置自定义字段类型

    文档: https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html

    Kibana 可以按步操作。也可以直接操作ES。

    PUT _index_template/template_1
    {
      "index_patterns": ["te*", "bar*"],
      "template": {
        "settings": {
          "number_of_shards": 1
        },
        "mappings": {
          "_source": {
            "enabled": true
          },
          "properties": {
            "host_name": {
              "type": "keyword"
            },
            "created_at": {
              "type": "date",
              "format": "EEE MMM dd HH:mm:ss Z yyyy"
            }
          }
        },
        "aliases": {
          "mydata": { }
        }
      },
      "priority": 500,
      "composed_of": ["component_template1", "runtime_component_template"], 
      "version": 3,
      "_meta": {
        "description": "my custom"
      }
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32

    7.2 自定义mdc字段如果不想默认text类型,需要提前在索引添加:

    文档: https://www.elastic.co/guide/en/elasticsearch/reference/current/explicit-mapping.html

    PUT /my-index-000001/_mapping
    {
      "properties": {
        "employee-id": {
          "type": "keyword",
          "index": false
        }
      }
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
  • 相关阅读:
    Kafka3.x核心知识速查手册-一、快速上手篇
    大数据-131 - Flink CEP 案例:检测交易活跃用户、超时未交付
    信息/数据
    【COMP305 LEC6 LEC 7】
    Windows常用快捷键与查询命令
    【数据结构】动态规划:如何通过最优子结构,完成复杂问题求解
    在Visual Studio/Qt Creator 中使用CMake安装和使用vcpkg包
    【6. N 字形变换】
    应对数据安全典型薄弱点,这家医院“外防内控”筑牢屏障
    冷知识:Mysql最大列限制和行限制
  • 原文地址:https://blog.csdn.net/isea533/article/details/127735388