• 03_kafka-eagle 监控


    • kafka-eagle 监控

    安装

    • download.kafka-eagle.org : https://github.com/smartloli/kafka-eagle-bin/archive/v3.0.1.tar.gz
    • https://docs.kafka-eagle.org/2.installation/2.installonlinuxmac
    • 需要配置 KE_HOME=/opt/app/kafka-eagle/efak-web-3.0.1 环境变量, /etc/profile 或者 .bashrc,添加到path, export KE_HOME
    • conf/system-config/properties
    kafka.eagle.zk.cluster.alias=cluster1
    cluster1.zk.list=hosta:2181,hostb:2181,hostc:2181
    # 注释掉 cluster2 相关内容
    
    # storage 
    # 注释掉 cluster2 相关内容
    
    # 开启报表图, 需要开启kafka 的jmx
    kafka.eagle.metrics.charts=true
    # sasl: 注释掉 cluster2 相关内容
    #  如果启用mysql,注释掉 sqlite 相关内容, jdbc 相关url 需要修改
    
    ---
    完整配置
    ######################################
    # multi zookeeper & kafka cluster list
    # Settings prefixed with 'kafka.eagle.' will be deprecated, use 'efak.' instead
    ######################################
    efak.zk.cluster.alias=cluster1
    cluster1.zk.list=kafka_1:2181,kafka_2:2181,kafka_3:2181
    #cluster2.zk.list=xdn10:2181,xdn11:2181,xdn12:2181
    
    ######################################
    # zookeeper enable acl
    ######################################
    cluster1.zk.acl.enable=false
    cluster1.zk.acl.schema=digest
    cluster1.zk.acl.username=test
    cluster1.zk.acl.password=test123
    
    ######################################
    # broker size online list
    ######################################
    cluster1.efak.broker.size=20
    
    ######################################
    # zk client thread limit
    ######################################
    kafka.zk.limit.size=16
    
    ######################################
    # EFAK webui port
    ######################################
    efak.webui.port=8048
    
    ######################################
    # EFAK enable distributed
    ######################################
    efak.distributed.enable=false
    efak.cluster.mode.status=master
    efak.worknode.master.host=localhost
    efak.worknode.port=8085
    
    ######################################
    # kafka jmx acl and ssl authenticate
    ######################################
    cluster1.efak.jmx.acl=false
    cluster1.efak.jmx.user=keadmin
    cluster1.efak.jmx.password=keadmin123
    cluster1.efak.jmx.ssl=false
    cluster1.efak.jmx.truststore.location=/data/ssl/certificates/kafka.truststore
    cluster1.efak.jmx.truststore.password=ke123456
    
    ######################################
    # kafka offset storage
    ######################################
    cluster1.efak.offset.storage=kafka
    #cluster2.efak.offset.storage=zk
    
    ######################################
    # kafka jmx uri
    ######################################
    cluster1.efak.jmx.uri=service:jmx:rmi:///jndi/rmi://kafka_2:9999/jmxrmi
    
    ######################################
    # kafka metrics, 15 days by default
    ######################################
    efak.metrics.charts=true
    efak.metrics.retain=15
    
    ######################################
    # kafka sql topic records max
    ######################################
    efak.sql.topic.records.max=5000
    efak.sql.topic.preview.records.max=10
    
    ######################################
    # delete kafka topic token
    ######################################
    efak.topic.token=keadmin
    
    ######################################
    # kafka sasl authenticate
    ######################################
    cluster1.efak.sasl.enable=false
    cluster1.efak.sasl.protocol=SASL_PLAINTEXT
    cluster1.efak.sasl.mechanism=SCRAM-SHA-256
    cluster1.efak.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="kafka" password="kafka-eagle";
    cluster1.efak.sasl.client.id=
    cluster1.efak.blacklist.topics=
    cluster1.efak.sasl.cgroup.enable=false
    cluster1.efak.sasl.cgroup.topics=
    #cluster2.efak.sasl.enable=false
    #cluster2.efak.sasl.protocol=SASL_PLAINTEXT
    #cluster2.efak.sasl.mechanism=PLAIN
    #cluster2.efak.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="kafka" password="kafka-eagle";
    #cluster2.efak.sasl.client.id=
    #cluster2.efak.blacklist.topics=
    #cluster2.efak.sasl.cgroup.enable=false
    #cluster2.efak.sasl.cgroup.topics=
    
    ######################################
    # kafka ssl authenticate
    ######################################
    cluster3.efak.ssl.enable=false
    cluster3.efak.ssl.protocol=SSL
    cluster3.efak.ssl.truststore.location=
    cluster3.efak.ssl.truststore.password=
    cluster3.efak.ssl.keystore.location=
    cluster3.efak.ssl.keystore.password=
    cluster3.efak.ssl.key.password=
    cluster3.efak.ssl.endpoint.identification.algorithm=https
    cluster3.efak.blacklist.topics=
    cluster3.efak.ssl.cgroup.enable=false
    cluster3.efak.ssl.cgroup.topics=
    
    ######################################
    # kafka sqlite jdbc driver address
    ######################################
    efak.driver=org.sqlite.JDBC
    efak.url=jdbc:sqlite:/opt/app/kafka-eagle/efak-web-3.0.1/db/ke.db
    #efak.url=jdbc:sqlite:/hadoop/kafka-eagle/db/ke.db
    efak.username=root
    efak.password=www.kafka-eagle.org
    
    ######################################
    # kafka mysql jdbc driver address
    ######################################
    #efak.driver=com.mysql.cj.jdbc.Driver
    #efak.driver=com.mysql.jdbc.Driver
    #efak.url=jdbc:mysql://192.168.1.9:3306/ke?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&useTimezone=true
    #efak.username=root
    #efak.password=123456
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144

    修改 kafka-server-start.sh

    • 3个节点都需要修改 kafka 启动文件
    • vim kafka-server-start.sh
    export JMX_PORT="9999"
    
    ---
    if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
        export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
    fi
    export JMX_PORT="9999"
    
    EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10

    修改 kafka-run-class.sh

    • kafka-run-class.sh 修改 JMX 相关内容【-Djava.rmi.server.hostname=kafka_3】(行前边数字为行号,只是为了标识修改位置):
    ...
    213  # JMX port to use
    214  if [  $JMX_PORT ]; then
    215    KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Djava.rmi.server.hostname=kafka_3"
    216    if ! echo "$KAFKA_JMX_OPTS" | grep -qF -- '-Dcom.sun.management.jmxremote.rmi.port=' ; then
    217      # If unset, set the RMI port to address issues with monitoring Kafka running in containers
    218      KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
    219    fi
    220  fi
    ...
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • -Djava.rmi.server.hostname=kafka_3 其中 kafka_3 三台机器根据实际域名修改;
    • 给 kafka-eagle/bin/ke.sh 赋予执行权限 chmod u+x bin/ke.sh
    • 启动: ./bin/ke.sh start
    • admin/ 123456 :
    • 输出如下:
    ...
    [2023-09-04 20:55:04] INFO: Port Progress: [##################################################] | 100%
    [2023-09-04 20:55:07] INFO: Config Progress: [##################################################] | 100%
    [2023-09-04 20:55:10] INFO: Startup Progress: [##################################################] | 100%
    [2023-09-04 20:54:59] INFO: Status Code[0]
    [2023-09-04 20:54:59] INFO: [Job done!]
    Welcome to
        ______    ______    ___     __ __
       / ____/   / ____/   /   |   / //_/
      / __/     / /_      / /| |  / ,<
     / /___    / __/     / ___ | / /| |
    /_____/   /_/       /_/  |_|/_/ |_|
    ( Eagle For Apache Kafka® )
    
    Version v3.0.1 -- Copyright 2016-2022
    *******************************************************************
    * EFAK Service has started success.
    * Welcome, Now you can visit 'http://192.168.1.28:8048'
    * Account:admin ,Password:123456
    *******************************************************************
        * <Usage> ke.sh [start|status|stop|restart|stats] </Usage>
    * <Usage> https://www.kafka-eagle.org/ </Usage>
    *******************************************************************
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 访问: http://192.168.1.28:8048
    • token: keadmin

    在这里插入图片描述

    问题

    eagle 日志报错

    ErrorLogger.org.springframework.scheduling.quartz.SchedulerFactoryBean#1_Worker-5 - ERROR - Job (DEFAULT.mbeanDetail threw an exception.
     org.quartz.SchedulerException: Job threw an unhandled exception. [See nested exception: org.springframework.scheduling.quartz.JobMethodInvocationFailedException: Invocation of method 'mbeanQuartz' on target class [class org.smartloli.kafka.eagle.web.quartz.MBeanQuartz] failed; nested exception is java.lang.NullPointerException]
            at org.quartz.core.JobRunShell.run(JobRunShell.java:213)
            at org.quartz.simpl.SimpleThreadPool$WorkerThread.run(SimpleThreadPool.java:573)
    Caused by: org.springframework.scheduling.quartz.JobMethodInvocationFailedException: Invocation of method 'mbeanQuartz' on target class [class org.smartloli.kafka.eagle.web.quartz.MBeanQuartz] failed; nested exception is java.lang.NullPointerException
            at org.springframework.scheduling.quartz.MethodInvokingJobDetailFactoryBean$MethodInvokingJob.executeInternal(MethodInvokingJobDetailFactoryBean.java:266)
            at org.springframework.scheduling.quartz.QuartzJobBean.execute(QuartzJobBean.java:75)
            at org.quartz.core.JobRunShell.run(JobRunShell.java:202)
            ... 1 more
    Caused by: java.lang.NullPointerException
            at org.smartloli.kafka.eagle.web.quartz.MBeanQuartz.mbeanQuartz(MBeanQuartz.java:94)
            at sun.reflect.GeneratedMethodAccessor102.invoke(Unknown Source)
            at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
            at java.lang.reflect.Method.invoke(Method.java:498)
            at org.springframework.util.MethodInvoker.invoke(MethodInvoker.java:269)
            at org.springframework.scheduling.quartz.MethodInvokingJobDetailFactoryBean$MethodInvokingJob.executeInternal(MethodInvokingJobDetailFactoryBean.java:257)
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 需要在 zoo.cfg 中添加配置: 4lw.commands.whitelist=*

    mysql 报错 时区问题

    • 如果使用mysql 作为使用的数据库,可能会遇到时区报错,还是一堆乱码即使配置 &serverTimezone=UTC&useTimezone=true 依旧报错。
    • 可能是 版本问题实测,降低eagle 版本 例如使用 1.4.0 后,数据库及表都会自动创建,同时也不会再报错。
  • 相关阅读:
    基于GoFrame+Vue+ElementUI搭建的博客管理系统
    C语言sizeof()计算空间大小为8的问题
    记录Manjaro Linux安装nvidia显卡驱动失败的经历
    java调用方法之歌唱比赛六个评委打分【0~100分之间的整数】。要求去掉一个最高分和一个最低分后四个评委的平均分即为选手的得分。
    新版kafka可视化界面组件
    opencv从入门到精通 哦吼03
    面试官:如果要存 IP 地址,用什么数据类型比较好?很多人都会答错
    「解析」COCO 数据读取与模型结果解析
    力扣 1582. 二进制矩阵中的特殊位置
    Bean 的作用域和生命周期
  • 原文地址:https://blog.csdn.net/wwq921220/article/details/132781917