• 系统学习Linux-ELK日志收集系统


    ELK日志收集系统集群实验

    实验环境

    角色主机名IP接口
    httpd192.168.31.50ens33
    node1192.168.31.51ens33
    noed2192.168.31.53ens33

    环境配置

    设置各个主机的ip地址为拓扑中的静态ip,并修改主机名

    1. #httpd
    2. [root@localhost ~]# hostnamectl set-hostname httpd
    3. [root@localhost ~]# bash
    4. [root@httpd ~]#
    5. #node1
    6. [root@localhost ~]# hostnamectl set-hostname node1
    7. [root@localhost ~]# bash
    8. [root@node1 ~]# vim /etc/hosts
    9. 192.168.31.51 node1
    10. 192.168.31.53 node2
    11. #node2
    12. [root@localhost ~]# hostnamectl set-hostname node2
    13. [root@localhost ~]# bash
    14. [root@node2 ~]# vim /etc/hosts
    15. 192.168.31.51 node1
    16. 192.168.31.53 node2

    安装elasticsearch

    1. #node1
    2. [root@node1 ~]# ls
    3. elk软件包 公共 模板 视频 图片 文档 下载 音乐 桌面
    4. [root@node1 ~]# mv elk软件包 elk
    5. [root@node1 ~]# ls
    6. elk 公共 模板 视频 图片 文档 下载 音乐 桌面
    7. [root@node1 ~]# cd elk
    8. [root@node1 elk]# ls
    9. elasticsearch-5.5.0.rpm kibana-5.5.1-x86_64.rpm node-v8.2.1.tar.gz
    10. elasticsearch-head.tar.gz logstash-5.5.1.rpm phantomjs-2.1.1-linux-x86_64.tar.bz2
    11. [root@node1 elk]# rpm -ivh elasticsearch-5.5.0.rpm
    12. 警告:elasticsearch-5.5.0.rpm: 头V4 RSA/SHA512 Signature, 密钥 ID d88e42b4: NOKEY
    13. 准备中... ################################# [100%]
    14. Creating elasticsearch group... OK
    15. Creating elasticsearch user... OK
    16. 正在升级/安装...
    17. 1:elasticsearch-0:5.5.0-1 ################################# [100%]
    18. ### NOT starting on installation, please execute the following statements to configure elasticsearch service to start automatically using systemd
    19. sudo systemctl daemon-reload
    20. sudo systemctl enable elasticsearch.service
    21. ### You can start elasticsearch service by executing
    22. sudo systemctl start elasticsearch.service
    23. #node2

    配置

    node1

    vim /etc/elasticsearch/elasticsearch.yml

    1. 17 cluster.name: my-elk-cluster //集群名称
    2. 23 node.name: node1 //节点名字
    3. 33 path.data: /var/lib/elasticsearch //数据存放路径
    4. 37 path.logs: /var/log/elasticsearch/ //日志存放路径
    5. 43 bootstrap.memory_lock: false //在启动的时候不锁定内存
    6. 55 network.host: 0.0.0.0 //提供服务绑定的IP地址,0.0.0.0代表所有地址
    7. 59 http.port: 9200 //侦听端口为9200
    8. 68 discovery.zen.ping.unicast.hosts: ["node1", "node2"] //群集发现通过单播实现

     node2

    1. 17 cluster.name: my-elk-cluster //集群名称
    2. 23 node.name: node1 //节点名字
    3. 33 path.data: /var/lib/elasticsearch //数据存放路径
    4. 37 path.logs: /var/log/elasticsearch/ //日志存放路径
    5. 43 bootstrap.memory_lock: false //在启动的时候不锁定内存
    6. 55 network.host: 0.0.0.0 //提供服务绑定的IP地址,0.0.0.0代表所有地址
    7. 59 http.port: 9200 //侦听端口为9200
    8. 68 discovery.zen.ping.unicast.hosts: ["node1", "node2"] //群集发现通过单播实现

    在node1安装-elasticsearch-head插件

    移动到elk文件夹

    1. #安装插件编译很慢
    2. [root@node1 ~]# cd elk/
    3. [root@node1 elk]# ls
    4. elasticsearch-5.5.0.rpm kibana-5.5.1-x86_64.rpm phantomjs-2.1.1-linux-x86_64.tar.bz2
    5. elasticsearch-head.tar.gz logstash-5.5.1.rpm node-v8.2.1.tar.gz
    6. [root@node1 elk]# tar xf node-v8.2.1.tar.gz
    7. [root@node1 elk]# cd node-v8.2.1/
    8. [root@node1 node-v8.2.1]# ./configure && make && make install
    9. [root@node1 elk]# cd ~/elk
    10. [root@node1 elk]# tar xf phantomjs-2.1.1-linux-x86_64.tar.bz2
    11. [root@node1 elk]# cd phantomjs-2.1.1-linux-x86_64/bin/
    12. [root@node1 bin]# ls
    13. phantomjs
    14. [root@node1 bin]# cp phantomjs /usr/local/bin/
    15. [root@node1 bin]# cd ~/elk/
    16. [root@node1 elk]# tar xf elasticsearch-head.tar.gz
    17. [root@node1 elk]# cd elasticsearch-head/
    18. [root@node1 elasticsearch-head]# npm install
    19. npm WARN deprecated fsevents@1.2.13: The v1 package contains DANGEROUS / INSECURE binaries. Upgrade to safe fsevents v2
    20. npm WARN optional SKIPPING OPTIONAL DEPENDENCY: fsevents@^1.0.0 (node_modules/karma/node_modules/chokidar/node_modules/fsevents):
    21. npm WARN notsup SKIPPING OPTIONAL DEPENDENCY: Unsupported platform for fsevents@1.2.13: wanted {"os":"darwin","arch":"any"} (current: {"os":"linux","arch":"x64"})
    22. npm WARN elasticsearch-head@0.0.0 license should be a valid SPDX license expression
    23. up to date in 3.536s

    修改elasticsearch配置文件

    1. [root@node1 ~]# vim /etc/elasticsearch/elasticsearch.yml
    2. 84 # ---------------------------------- Various -----------------------------------
    3. 85 #
    4. 86 # Require explicit names when deleting indices:
    5. 87 #
    6. 88 #action.destructive_requires_name: true
    7. 89 http.cors.enabled: true //开启跨域访问支持,默认为false
    8. 90 http.cors.allow-origin:"*" //跨域访问允许的域名地址
    9. [root@node1 ~]# systemctl restart elasticsearch.service
    10. #启动elasticsearch-head
    11. cd /root/elk/elasticsearch-head
    12. npm run start &
    13. #查看监听
    14. netstat -anput | grep :9100
    15. #访问
    16. http://192.168.31.51:9100

    node1服务器安装logstash

    1. [root@node1 elk]# rpm -ivh logstash-5.5.1.rpm
    2. 警告:logstash-5.5.1.rpm: 头V4 RSA/SHA512 Signature, 密钥 ID d88e42b4: NOKEY
    3. 准备中... ################################# [100%]
    4. 软件包 logstash-1:5.5.1-1.noarch 已经安装
    5. #开启并创建一个软连接
    6. [root@node1 elk]# systemctl start logstash.service
    7. [root@node1 elk]# In -s /usr/share/logstash/bin/logstash /usr/local/bin/
    8. #测试1
    9. [root@node1 elk]# logstash -e 'input{ stdin{} }output { stdout{} }'
    10. ERROR StatusLogger No log4j2 configuration file found. Using default configuration: logging only errors to the console.
    11. WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults
    12. Could not find log4j2 configuration at path //usr/share/logstash/config/log4j2.properties. Using default config which logs to console
    13. 16:03:50.250 [main] INFO logstash.setting.writabledirectory - Creating directory {:setting=>"path.queue", :path=>"/usr/share/logstash/data/queue"}
    14. 16:03:50.256 [main] INFO logstash.setting.writabledirectory - Creating directory {:setting=>"path.dead_letter_queue", :path=>"/usr/share/logstash/data/dead_letter_queue"}
    15. 16:03:50.330 [LogStash::Runner] INFO logstash.agent - No persistent UUID file found. Generating new UUID {:uuid=>"9ba08544-a7a7-4706-a3cd-2e2ca163548d", :path=>"/usr/share/logstash/data/uuid"}
    16. 16:03:50.584 [[main]-pipeline-manager] INFO logstash.pipeline - Starting pipeline {"id"=>"main", "pipeline.workers"=>2, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>5, "pipeline.max_inflight"=>250}
    17. 16:03:50.739 [[main]-pipeline-manager] INFO logstash.pipeline - Pipeline main started
    18. The stdin plugin is now waiting for input:
    19. 16:03:50.893 [Api Webserver] INFO logstash.agent - Successfully started Logstash API endpoint {:port=>9600}
    20. ^C16:04:32.838 [SIGINT handler] WARN logstash.runner - SIGINT received. Shutting down the agent.
    21. 16:04:32.855 [LogStash::Runner] WARN logstash.agent - stopping pipeline {:id=>"main"}
    22. #测试2
    23. [root@node1 elk]# logstash -e 'input { stdin{} } output { stdout{ codec=>rubydebug }}'
    24. ERROR StatusLogger No log4j2 configuration file found. Using default configuration: logging only errors to the console.
    25. WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults
    26. Could not find log4j2 configuration at path //usr/share/logstash/config/log4j2.properties. Using default config which logs to console
    27. 16:46:23.975 [[main]-pipeline-manager] INFO logstash.pipeline - Starting pipeline {"id"=>"main", "pipeline.workers"=>2, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>5, "pipeline.max_inflight"=>250}
    28. The stdin plugin is now waiting for input:
    29. 16:46:24.014 [[main]-pipeline-manager] INFO logstash.pipeline - Pipeline main started
    30. 16:46:24.081 [Api Webserver] INFO logstash.agent - Successfully started Logstash API endpoint {:port=>9600}
    31. ^C16:46:29.970 [SIGINT handler] WARN logstash.runner - SIGINT received. Shutting down the agent.
    32. 16:46:29.975 [LogStash::Runner] WARN logstash.agent - stopping pipeline {:id=>"main"}
    33. #测试3
    34. 16:46:29.975 [LogStash::Runner] WARN logstash.agent - stopping pipeline {:id=>"main"}
    35. [root@node1 elk]# logstash -e 'input { stdin{} } output { elasticsearch{ hosts=>["192.168.31.51:9200"]} }'
    36. ERROR StatusLogger No log4j2 configuration file found. Using default configuration: logging only errors to the console.
    37. WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults
    38. Could not find log4j2 configuration at path //usr/share/logstash/config/log4j2.properties. Using default config which logs to console
    39. 16:46:55.951 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://192.168.31.51:9200/]}}
    40. 16:46:55.955 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://192.168.31.51:9200/, :path=>"/"}
    41. 16:46:56.049 [[main]-pipeline-manager] WARN logstash.outputs.elasticsearch - Restored connection to ES instance {:url=>#0x3a106333>}
    42. 16:46:56.068 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Using mapping template from {:path=>nil}
    43. 16:46:56.204 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Attempting to install template {:manage_template=>{"template"=>"logstash-*", "version"=>50001, "settings"=>{"index.refresh_interval"=>"5s"}, "mappings"=>{"_default_"=>{"_all"=>{"enabled"=>true, "norms"=>false}, "dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"@timestamp"=>{"type"=>"date", "include_in_all"=>false}, "@version"=>{"type"=>"keyword", "include_in_all"=>false}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}}
    44. 16:46:56.233 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Installing elasticsearch template to _template/logstash
    45. 16:46:56.429 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>[#0x19aeba5c>]}
    46. 16:46:56.432 [[main]-pipeline-manager] INFO logstash.pipeline - Starting pipeline {"id"=>"main", "pipeline.workers"=>2, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>5, "pipeline.max_inflight"=>250}
    47. 16:46:56.461 [[main]-pipeline-manager] INFO logstash.pipeline - Pipeline main started
    48. The stdin plugin is now waiting for input:
    49. 16:46:56.561 [Api Webserver] INFO logstash.agent - Successfully started Logstash API endpoint {:port=>9600}
    50. ^C16:46:57.638 [SIGINT handler] WARN logstash.runner - SIGINT received. Shutting down the agent.
    51. 16:46:57.658 [LogStash::Runner] WARN logstash.agent - stopping pipeline {:id=>"main"}

    logstash日志收集文件格式(默认存储在/etc/logstash/conf.d)

    Logstash配置文件基本由三部分组成:input、output以及 filter(根据需要)。标准的配置文件格式如下:

    input (...)  输入

    filter {...}   过滤

    output {...}  输出

    在每个部分中,也可以指定多个访问方式。例如,若要指定两个日志来源文件,则格式如下:

    input {

    file{path =>"/var/log/messages" type =>"syslog"}

    file { path =>"/var/log/apache/access.log"  type =>"apache"}

    }

    案例:通过logstash收集系统信息日志

    1. [root@node1 conf.d]# chmod o+r /var/log/messages
    2. [root@node1 conf.d]# vim /etc/logstash/conf.d/system.conf
    3. input {
    4. file{
    5. path => "/var/log/messages"
    6. type => "system"
    7. start_position => "beginning"
    8. }
    9. }
    10. output {
    11. elasticsearch{
    12. hosts =>["192.168.31.51:9200"]
    13. index => "system-%{+YYYY.MM.dd}"
    14. }
    15. }
    16. [root@node1 conf.d]# systemctl restart logstash.service

    node1节点安装kibana

    cd ~/elk

    1. [root@node1 elk]# rpm -ivh kibana-5.5.1-x86_64.rpm
    2. 警告:kibana-5.5.1-x86_64.rpm: 头V4 RSA/SHA512 Signature, 密钥 ID d88e42b4: NOKEY
    3. 准备中... ################################# [100%]
    4. 正在升级/安装...
    5. 1:kibana-5.5.1-1 ################################# [100%]
    6. [root@node1 elk]# vim /etc/kibana/kibana.yml
    7. 2 server.port: 5601 //Kibana打开的端口
    8. 7 server.host: "0.0.0.0" //Kibana侦听的地址
    9. 21 elasticsearch.url: "http://192.168.31.51:9200" //和Elasticsearch 建立连接
    10. 30 kibana.index: ".kibana" //在Elasticsearch中添加.kibana索引
    11. [root@node1 elk]# systemctl start kibana.service

    访问kibana

    首次访问需要添加索引,我们添加前面已经添加过的索引:system-*

    企业案例

    收集nginx访问日志信息

    在httpd服务器上安装logstash,参数上述安装过程

    logstash在httpd服务器上作为agent(代理),不需要启动

    编写httpd日志收集配置文件

    1. [root@httpd ]# yum install -y httpd
    2. [root@httpd ]# systemctl start httpd
    3. [root@httpd ]# systemctl start logstash
    4. [root@httpd ]# vim /etc/logstash/conf.d/httpd.conf
    5. input {
    6. file {
    7. path => "/var/log/httpd/access_log"
    8. type => "access"
    9. start_position => "beginning"
    10. }
    11. }
    12. output {
    13. elasticsearch {
    14. hosts => ["192.168.31.51:9200"]
    15. index => "httpd-%{+YYYY.MM.dd}"
    16. }
    17. }
    18. [root@httpd ]# logstash -f /etc/logstash/conf.d/httpd.conf
    19. OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
    20. ERROR StatusLogger No log4j2 configuration file found. Using default configuration: logging only errors to the console.
    21. WARNING: Could not find logstash.yml which is typically located in $LS_HOME/config or /etc/logstash. You can specify the path using --path.settings. Continuing using the defaults
    22. Could not find log4j2 configuration at path //usr/share/logstash/config/log4j2.properties. Using default config which logs to console
    23. 21:29:34.272 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://192.168.31.51:9200/]}}
    24. 21:29:34.275 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Running health check to see if an Elasticsearch connection is working {:healthcheck_url=>http://192.168.31.51:9200/, :path=>"/"}
    25. 21:29:34.400 [[main]-pipeline-manager] WARN logstash.outputs.elasticsearch - Restored connection to ES instance {:url=>#0x1c254b0a>}
    26. 21:29:34.423 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Using mapping template from {:path=>nil}
    27. 21:29:34.579 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - Attempting to install template {:manage_template=>{"template"=>"logstash-*", "version"=>50001, "settings"=>{"index.refresh_interval"=>"5s"}, "mappings"=>{"_default_"=>{"_all"=>{"enabled"=>true, "norms"=>false}, "dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"@timestamp"=>{"type"=>"date", "include_in_all"=>false}, "@version"=>{"type"=>"keyword", "include_in_all"=>false}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}}
    28. 21:29:34.585 [[main]-pipeline-manager] INFO logstash.outputs.elasticsearch - New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>[#0x3b483278>]}
    29. 21:29:34.588 [[main]-pipeline-manager] INFO logstash.pipeline - Starting pipeline {"id"=>"main", "pipeline.workers"=>1, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>5, "pipeline.max_inflight"=>125}
    30. 21:29:34.845 [[main]-pipeline-manager] INFO logstash.pipeline - Pipeline main started
    31. 21:29:34.921 [Api Webserver] INFO logstash.agent - Successfully started Logstash API endpoint {:port=>9600}
    32. wwwww
    33. w
    34. w

  • 相关阅读:
    低代码平台选型(三)国产化
    Transformer与强化学习结合提升物联网智能决策
    Lodop使用总结
    【Python-Django】基于TF-IDF算法的医疗推荐系统复现过程
    73.C++类模板
    阿里云的域名和ip绑定
    Flutter 文件读写-path_provider
    985、211 毕业一年,面试八家大厂,四面拿美团 offer(Java 后端)
    翻译: GitHub Copilot开启AI自动生成代码的时代
    【编程题】【Scratch三级】2020.12 躲避恐龙
  • 原文地址:https://blog.csdn.net/weixin_66894765/article/details/132563299