• SpringBoot——》集成Kafka示例


    推荐链接:
        总结——》【Java】
        总结——》【Mysql】
        总结——》【Redis】
        总结——》【Spring】
        总结——》【SpringBoot】
        总结——》【MyBatis、MyBatis-Plus】

    一、pom

    <properties>
      <spring.kafka.version>2.4.3.RELEASEspring.kafka.version>
      <kafka-client.version>2.4.1kafka-client.version>
    properties>
    
    <dependencyManagement>
      <dependencies>
        <dependency>
          <groupId>org.apache.kafkagroupId>
          <artifactId>kafka-clientsartifactId>
          <version>${kafka-client.version}version>
        dependency>
        <dependency>
          <groupId>org.springframework.kafkagroupId>
          <artifactId>spring-kafkaartifactId>
          <version>${spring.kafka.version}version>
        dependency>
      dependencies>	
    dependencyManagement>
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19

    二、配置参数

    1、生产者

    spring.kafka.bootstrap-servers = kafka-s1:9092,kafka-s2:9092,kafka-s3:9092
    spring.kafka.producer.retries = 0
    spring.kafka.producer.acks = 1
    spring.kafka.producer.batch-size = 16384
    spring.kafka.producer.buffer-memory = 33554432
    spring.kafka.producer.key-serializer = org.apache.kafka.common.serialization.StringSerializer
    spring.kafka.producer.value-serializer = org.apache.kafka.common.serialization.StringSerializer
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7

    2、消费者

    spring.kafka.bootstrap-servers = kafka-s1:9092,kafka-s2:9092,kafka-s3:9092
    spring.kafka.consumer.auto-commit-interval = 1S
    spring.kafka.consumer.auto-offset-reset = earliest
    spring.kafka.consumer.enable-auto-commit = true
    spring.kafka.consumer.key-deserializer = org.apache.kafka.common.serialization.StringDeserializer
    spring.kafka.consumer.value-deserializer = org.apache.kafka.common.serialization.StringDeserializer
    spring.kafka.consumer.listener.concurrency = 5
    spring.kafka.consumer.group-id = g1
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8

    三、消费者配置类KafkaConsumerConfig.java

    import com.eju.goodhouse.service.business.consumer.SyncEsfCommunityComsumer;
    import jodd.util.StringUtil;
    import org.apache.kafka.clients.consumer.ConsumerConfig;
    import org.apache.kafka.common.serialization.StringDeserializer;
    import org.springframework.beans.factory.annotation.Value;
    import org.springframework.context.annotation.Bean;
    import org.springframework.context.annotation.Configuration;
    import org.springframework.kafka.annotation.EnableKafka;
    import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
    import org.springframework.kafka.config.KafkaListenerContainerFactory;
    import org.springframework.kafka.core.ConsumerFactory;
    import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
    import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
    
    import java.util.HashMap;
    import java.util.Map;
    
    
    @Configuration
        @EnableKafka
        public class KafkaConsumerConfig {
    
            @Value("${spring.kafka.bootstrap-servers}")
            private String broker;
    
            @Value("${spring.kafka.consumer.group-id}")
            private String groupId;
    
            @Value("${spring.kafka.consumer.auto-offset-reset}")
            private String autoOffsetReset;
    
            @Value("${spring.kafka.consumer.enable-auto-commit}")
            private String enableAutoCommit;
    
            public Map<String, Object> consumerConfigs(String consumerGroupId) {
                Map<String, Object> propsMap = new HashMap<>();
                // kafka服务地址
                propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, broker);
                // 消费后是否自动提交,true自动,false手动
                propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
                // 获取消息后提交偏移量的最大时间
                propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
                // 超时时间,服务端没有收到心跳就会认为当前消费者失效
                propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
                // 序列化
                propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
                propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
                // 默认消费组
                propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, StringUtil.isNotBlank(consumerGroupId) ? consumerGroupId : groupId);
                // earliest从头开始消费、latest获取最新消息 、none
                propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
                propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000);
                return propsMap;
            }
    
            public ConsumerFactory<String, String> consumerFactory(String consumerGroupId) {
                return new DefaultKafkaConsumerFactory<>(consumerConfigs(consumerGroupId));
            }
    
            @Bean("kafkaListenerContainerFactory")
            KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
                ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
                // consumerGroupId为空时,会用默认的groupId
                factory.setConsumerFactory(consumerFactory("g1"));
                factory.setConcurrency(4);
                // 设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
            factory.setBatchListener(true);
            factory.getContainerProperties().setPollTimeout(3000);
            return factory;
            }
    
            @Bean("kafkaListenerContainerFactory2")
            KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory2() {
            ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
            // consumerGroupId为空时,会用默认的groupId
            factory.setConsumerFactory(consumerFactory("g2"));
            factory.setConcurrency(1);
            // 设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
            factory.setBatchListener(true);
            factory.getContainerProperties().setPollTimeout(3000);
            factory.getContainerProperties().setAckCount(10);
            factory.getContainerProperties().setAckTime(10000);
            return factory;
            }
    
            @Bean
            public SyncEsfCommunityComsumer listenerForSyncEsfCommunity() {
            return new SyncEsfCommunityComsumer();
            }
            }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90

    四、消费者SyncEsfCommunityComsumer.java

    import lombok.extern.slf4j.Slf4j;
    import org.apache.kafka.clients.consumer.ConsumerRecord;
    import org.springframework.kafka.annotation.KafkaListener;
    
    import java.util.List;
    import java.util.Optional;
    
    @Slf4j
    public class SyncEsfCommunityComsumer {
    
        @KafkaListener(id = "listenerForSyncEsfCommunity", topics = "${monitor.house-asset-community.topic}", containerFactory = "kafkaListenerContainerFactory")
        public void listenerForSyncEsfCommunity(List<ConsumerRecord<?, ?>> records) throws Exception {
            log.info("【listenerForSyncEsfCommunity】records size:【{}】, Thread ID:【{}】", records.size(), Thread.currentThread().getId());
            for (ConsumerRecord<?, ?> record : records) {
                Optional<?> kafkaMessage = Optional.ofNullable(record.value());
                if (kafkaMessage.isPresent()) {
                    // TODO 业务处理
                }
            }
        }
    
        @KafkaListener(id = "listenerForSyncEsfRegion", topics = "${monitor.house-asset-region.topic}", containerFactory = "kafkaListenerContainerFactory2")
        public void listenerForSyncEsfRegion(List<ConsumerRecord<?, ?>> records) throws Exception {
            log.info("【listenerForSyncEsfRegion】records size:【{}】, Thread ID:【{}】", records.size(), Thread.currentThread().getId());
            for (ConsumerRecord<?, ?> record : records) {
                Optional<?> kafkaMessage = Optional.ofNullable(record.value());
                if (kafkaMessage.isPresent()) {
                    // TODO 业务处理
                }
            }
        }
    
    }
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
  • 相关阅读:
    SAP UI5 Page 控件的构造函数参数讲解
    web扫码登录
    动态规划——数字三角形模型
    Golang: Store Query Result in a Map
    Node.js身份证实名认证接口、身份证识别API
    Python3.11教程6:标准库简介1——os、shutil、sys、random、time、datetime、 threading
    基于​Segment-and-Track Anything与ProPainter实现视频一键目标移除与一键祛除水印
    C语言基础知识学习 -- 操作符和关键字,#define,指针
    向日葵x华测导航:远程控制如何助力导航测绘设备运维
    前端构建工具总结
  • 原文地址:https://blog.csdn.net/weixin_43453386/article/details/128189562