• DockerCompose部署系列:构建ELK并同步MySQL数据


    1. # 拉取镜像
    2. docker pull mysql:5.7
    3. # 启动镜像
    4. docker run -p 3306:3306 --name mysql -v /data/mysql/log:/var/log/mysql -v /data/mysql/data:/var/lib/mysql -v /data/mysql/conf:/etc/mysql -e MYSQL_ROOT_PASSWORD=123456 -d mysql:5.7
    5. # 配置MySQL
    6. vim /data/mysql/conf/my.cnf
    7. [client]
    8. default-character-set=utf8
    9. [mysql]
    10. default-character-set=utf8
    11. [mysqld]
    12. init_connect='SET collation_connection = utf8_unicode_ci'
    13. init_connect='SET NAMES utf8'
    14. character-set-server=utf8
    15. collation-server=utf8_unicode_ci
    16. skip-character-set-client-handshake
    17. skip-name-resolve
    18. # 重启MySQL
    19. docker restart mysql
    20. # 建表
    21. DROP TABLE IF EXISTS `sys_log`;
    22. CREATE TABLE `sys_log` (
    23. `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '日志主键',
    24. `title` varchar(50) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '模块标题',
    25. `business_type` int(2) NULL DEFAULT 0 COMMENT '业务类型(0其它 1新增 2修改 3删除)',
    26. `method` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '方法名称',
    27. `request_method` varchar(10) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '请求方式',
    28. `oper_name` varchar(50) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '操作人员',
    29. `oper_url` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '请求URL',
    30. `oper_ip` varchar(128) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT '' COMMENT '主机地址',
    31. `oper_time` datetime(0) NULL DEFAULT NULL COMMENT '操作时间',
    32. PRIMARY KEY (`id`) USING BTREE
    33. ) ENGINE = InnoDB AUTO_INCREMENT = 1585197503834284034 CHARACTER SET = utf8 COLLATE = utf8_general_ci COMMENT = '操作日志记录' ROW_FORMAT = Dynamic;
    34. SET FOREIGN_KEY_CHECKS = 1;
    35. # ELK搭建准备
    36. # elasticsearch挂载
    37. mkdir -p /data/elk/elasticsearch/{config,plugins,data,logs}
    38. # 赋权
    39. chmod 777 /data/elk/elasticsearch/{config,plugins,data,logs}
    40. # kibana挂载
    41. mkdir -p /data/elk/kibana/config
    42. # logstash挂载
    43. mkdir -p /data/elk/logstash/config
    44. # 赋权
    45. chmod 777 /data/elk/logstash/config
    46. # 配置elasticsearch
    47. vim /data/elk/elasticsearch/config/elasticsearch.yml
    48. http.host: 0.0.0.0
    49. xpack.security.enabled: false
    50. # 配置kibana
    51. vim /data/elk/kibana/config/kibana.yml
    52. server.host: 0.0.0.0
    53. elasticsearch.hosts: [ "http://192.168.5.11:9200" ]
    54. # 配置logstash
    55. vim /data/elk/logstash/config/logstash.yml
    56. http.host: 0.0.0.0
    57. xpack.monitoring.elasticsearch.hosts: [ "http://192.168.5.11:9200" ]
    58. # 创建日志文件
    59. touch /data/elk/logstash/config/log
    60. chmod 777 /data/elk/logstash/config/log
    61. # 自行下载mysql-connector-java-8.0.28.jar 放到/data/elk/logstash/config/
    62. # 配置logstash文件
    63. vim /data/elk/logstash/config/logstash.conf
    64. input {
    65. stdin {
    66. }
    67. jdbc {
    68. jdbc_connection_string => "jdbc:mysql://192.168.5.11:3306/test?useUnicode=true&characterEncoding=utf8&serverTimezone=UTC"
    69. jdbc_user => "root"
    70. jdbc_password => "123456"
    71. jdbc_driver_library => "/usr/share/logstash/config/mysql-connector-java-8.0.28.jar"
    72. jdbc_driver_class => "com.mysql.jdbc.Driver"
    73. jdbc_paging_enabled => "true"
    74. jdbc_page_size => "300000"
    75. statement => "SELECT id, title, business_type, method, request_method, oper_name, oper_url, oper_ip, oper_time FROM sys_log"
    76. schedule => "*/1 * * * *"
    77. use_column_value => false
    78. tracking_column_type => "timestamp"
    79. tracking_column => "oper_time"
    80. record_last_run => true
    81. jdbc_default_timezone => "Asia/Shanghai"
    82. last_run_metadata_path => "/usr/share/logstash/config/log"
    83. }
    84. }
    85. output {
    86. elasticsearch {
    87. hosts => ["192.168.5.11:9200"]
    88. index => "sys_log"
    89. document_id => "%{id}"
    90. }
    91. stdout {
    92. codec => json_lines
    93. }
    94. }
    95. # 配置流水线
    96. vim /data/elk/logstash/config/pipelines.yml
    97. - pipeline.id: sys_log
    98. path.config: "/usr/share/logstash/config/logstash.conf"
    99. # 编辑dockerCompose一键搭建
    100. vim docker-compose.yml
    101. version: '3'
    102. services:
    103. elasticsearch:
    104. image: elasticsearch:7.17.7
    105. container_name: elasticsearch
    106. ports:
    107. - "9200:9200"
    108. - "9300:9300"
    109. environment:
    110. - cluster.name=elasticsearch
    111. - discovery.type=single-node
    112. - "ES_JAVA_OPTS=-Xms64m -Xmx512m"
    113. volumes:
    114. - /data/elk/elasticsearch/plugins:/usr/share/elasticsearch/plugins
    115. - /data/elk/elasticsearch/data:/usr/share/elasticsearch/data
    116. - /data/elk/elasticsearch/logs:/usr/share/elasticsearch/logs
    117. kibana:
    118. image: kibana:7.17.7
    119. container_name: kibana
    120. ports:
    121. - "5601:5601"
    122. depends_on:
    123. - elasticsearch
    124. environment:
    125. I18N_LOCALE: zh-CN
    126. volumes:
    127. - /data/elk/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
    128. logstash:
    129. image: logstash:7.17.7
    130. container_name: logstash
    131. ports:
    132. - "5044:5044"
    133. volumes:
    134. - /data/elk/logstash/config:/usr/share/logstash/config
    135. depends_on:
    136. - elasticsearch
    137. # docker-compose.yml文件下执行
    138. docker-compose up -d
    139. # 测试访问Kibana
    140. http://192.168.5.11:5601/app/home#/
    141. # 安装完之后,单个启动使用
    142. # 启动elasticsearch
    143. docker run --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx512m" -v /data/elk/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /data/elk/elasticsearch/data:/usr/share/elasticsearch/data -v /data/elk/elasticsearch/plugins:/usr/share/elasticsearch/plugins -d elasticsearch:7.17.7
    144. # 启动kibana
    145. docker run --name kibana -e ELASTICSEARCH_HOSTS=http://192.168.5.11:9200 -p 5601:5601 -d kibana:7.17.7
    146. # 启动logstash
    147. docker run --name logstash -p 5044:5044 -v /data/elk/logstash/config:/usr/share/logstash/config -d logstash:7.17.7

    在Kibana上创建索引,然后再数据库的sys_log表中添加测试数据。将会自动同步到ES中去

    1. # 创建与数据库表一样的索引结构
    2. PUT /sys_log
    3. {
    4. "settings": {
    5. "number_of_shards": 1,
    6. "number_of_replicas": 0,
    7. "index": {
    8. "max_result_window": 100000000
    9. }
    10. },
    11. "mappings": {
    12. "dynamic": "strict",
    13. "properties": {
    14. "@timestamp": {
    15. "type": "date"
    16. },
    17. "@version": {
    18. "type": "text",
    19. "fields": {
    20. "keyword": {
    21. "type": "keyword",
    22. "ignore_above": 256
    23. }
    24. }
    25. },
    26. "business_type": {
    27. "type": "integer"
    28. },
    29. "title": {
    30. "type": "text"
    31. },
    32. "method": {
    33. "type": "text"
    34. },
    35. "request_method": {
    36. "type": "text"
    37. },
    38. "oper_name": {
    39. "type": "text"
    40. },
    41. "oper_url": {
    42. "type": "text"
    43. },
    44. "oper_ip": {
    45. "type": "text"
    46. },
    47. "oper_time": {
    48. "type": "date"
    49. },
    50. "id": {
    51. "type": "long"
    52. }
    53. }
    54. }
    55. }
    56. # 查询索引
    57. GET /sys_log/_search
    58. {
    59. "query": {
    60. "match_all": {}
    61. }
    62. }

  • 相关阅读:
    本、硕、博区别真的辣么大吗?
    JPA如何查询部分字段
    ansible的介绍安装与模块
    LC926. 将字符串翻转到单调递增(JAVA - 动态规划)
    django-rest-framework 基础四 过滤、排序、分页、异常处理
    数据化运营15 活跃(上):如何通过运营手法提升⽤户活跃度?
    Docker镜像仓库:存储与分发Docker镜像的中央仓库
    「数据结构详解·二」二叉树的初步
    红黑树(Red Black Tree)
    MyBatis多条件查询、动态SQL、多表操作、注解开发详细教程
  • 原文地址:https://blog.csdn.net/CancerKing/article/details/128168156