• Canal + Kafka 同步 MySQL 数据到 Redis


    解决缓存和数据库一致性问题

    一般来说,缓存中的数据没什么问题,但是数据库更新后,就容易出现缓存(Redis)和数据库(MySQL)间的数据一致性问题。由于写和读是并发的,没法保证顺序,就会出现缓存和数据库的数据不一致的问题

    Canal工作原理
    canal模拟mysql slave的交互协议,伪装自己为mysql slave,向mysql master发送dump协议
    mysql master收到dump请求,开始推送binary log给slave(也就是canal)
    canal解析binary log对象(原始为byte流)

    可参考官方文档

    QuickStart · alibaba/canal Wiki · GitHub

    1、Kafka,ZK安装略,本博主其它文章有

            安装好后,先启动ZK,在启动Kafka

    2、win10编辑my.ini

    1. [mysqld]
    2. #开启 binlog
    3. log-bin=mysql-bin
    4. #选择 ROW 模式
    5. binlog-format=ROW
    6. # 配置 MySQL replaction 需要定义,不要和 canal 的 slaveId 重复
    7. server_id=1

            

    1. 开启授权
    2. CREATE USER canal IDENTIFIED BY 'canal';
    3. GRANT SELECT, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%';
    4. -- GRANT ALL PRIVILEGES ON *.* TO 'canal'@'%' ;
    5. FLUSH PRIVILEGES;

    3、下载canal

            下载地址:Releases · alibaba/canal · GitHub

    解压后会有5文件夹

    进入conf

    编辑canal.properties

    1. #################################################
    2. ######### common argument #############
    3. #################################################
    4. # tcp bind ip
    5. canal.ip =
    6. # register ip to zookeeper
    7. canal.register.ip =
    8. canal.port = 11111
    9. canal.metrics.pull.port = 11112
    10. # canal instance user/passwd
    11. # canal.user = canal
    12. # canal.passwd = E3619321C1A937C46A0D8BD1DAC39F93B27D4458
    13. # canal admin config
    14. #canal.admin.manager = 127.0.0.1:8089
    15. canal.admin.port = 11110
    16. canal.admin.user = admin
    17. canal.admin.passwd = 4ACFE3202A5FF5CF467898FC58AAB1D615029441
    18. # admin auto register
    19. #canal.admin.register.auto = true
    20. #canal.admin.register.cluster =
    21. #canal.admin.register.name =
    22. canal.zkServers =
    23. # flush data to zk
    24. canal.zookeeper.flush.period = 1000
    25. canal.withoutNetty = false
    26. # tcp, kafka, rocketMQ, rabbitMQ
    27. canal.serverMode = kafka
    28. # flush meta cursor/parse position to file
    29. canal.file.data.dir = ${canal.conf.dir}
    30. canal.file.flush.period = 1000
    31. ## memory store RingBuffer size, should be Math.pow(2,n)
    32. canal.instance.memory.buffer.size = 16384
    33. ## memory store RingBuffer used memory unit size , default 1kb
    34. canal.instance.memory.buffer.memunit = 1024
    35. ## meory store gets mode used MEMSIZE or ITEMSIZE
    36. canal.instance.memory.batch.mode = MEMSIZE
    37. canal.instance.memory.rawEntry = true
    38. ## detecing config
    39. canal.instance.detecting.enable = false
    40. #canal.instance.detecting.sql = insert into retl.xdual values(1,now()) on duplicate key update x=now()
    41. canal.instance.detecting.sql = select 1
    42. canal.instance.detecting.interval.time = 3
    43. canal.instance.detecting.retry.threshold = 3
    44. canal.instance.detecting.heartbeatHaEnable = false
    45. # support maximum transaction size, more than the size of the transaction will be cut into multiple transactions delivery
    46. canal.instance.transaction.size = 1024
    47. # mysql fallback connected to new master should fallback times
    48. canal.instance.fallbackIntervalInSeconds = 60
    49. # network config
    50. canal.instance.network.receiveBufferSize = 16384
    51. canal.instance.network.sendBufferSize = 16384
    52. canal.instance.network.soTimeout = 30
    53. # binlog filter config
    54. canal.instance.filter.druid.ddl = true
    55. canal.instance.filter.query.dcl = false
    56. canal.instance.filter.query.dml = false
    57. canal.instance.filter.query.ddl = false
    58. canal.instance.filter.table.error = false
    59. canal.instance.filter.rows = false
    60. canal.instance.filter.transaction.entry = false
    61. canal.instance.filter.dml.insert = false
    62. canal.instance.filter.dml.update = false
    63. canal.instance.filter.dml.delete = false
    64. # binlog format/image check
    65. canal.instance.binlog.format = ROW,STATEMENT,MIXED
    66. canal.instance.binlog.image = FULL,MINIMAL,NOBLOB
    67. # binlog ddl isolation
    68. canal.instance.get.ddl.isolation = false
    69. # parallel parser config
    70. canal.instance.parser.parallel = true
    71. ## concurrent thread number, default 60% available processors, suggest not to exceed Runtime.getRuntime().availableProcessors()
    72. #canal.instance.parser.parallelThreadSize = 16
    73. ## disruptor ringbuffer size, must be power of 2
    74. canal.instance.parser.parallelBufferSize = 256
    75. # table meta tsdb info
    76. canal.instance.tsdb.enable = true
    77. canal.instance.tsdb.dir = ${canal.file.data.dir:../conf}/${canal.instance.destination:}
    78. canal.instance.tsdb.url = jdbc:h2:${canal.instance.tsdb.dir}/h2;CACHE_SIZE=1000;MODE=MYSQL;
    79. canal.instance.tsdb.dbUsername = canal
    80. canal.instance.tsdb.dbPassword = canal
    81. # dump snapshot interval, default 24 hour
    82. canal.instance.tsdb.snapshot.interval = 24
    83. # purge snapshot expire , default 360 hour(15 days)
    84. canal.instance.tsdb.snapshot.expire = 360
    85. #################################################
    86. ######### destinations #############
    87. #################################################
    88. canal.destinations = example
    89. # conf root dir
    90. canal.conf.dir = ../conf
    91. # auto scan instance dir add/remove and start/stop instance
    92. canal.auto.scan = true
    93. canal.auto.scan.interval = 5
    94. # set this value to 'true' means that when binlog pos not found, skip to latest.
    95. # WARN: pls keep 'false' in production env, or if you know what you want.
    96. canal.auto.reset.latest.pos.mode = false
    97. canal.instance.tsdb.spring.xml = classpath:spring/tsdb/h2-tsdb.xml
    98. #canal.instance.tsdb.spring.xml = classpath:spring/tsdb/mysql-tsdb.xml
    99. canal.instance.global.mode = spring
    100. canal.instance.global.lazy = false
    101. canal.instance.global.manager.address = ${canal.admin.manager}
    102. #canal.instance.global.spring.xml = classpath:spring/memory-instance.xml
    103. canal.instance.global.spring.xml = classpath:spring/file-instance.xml
    104. #canal.instance.global.spring.xml = classpath:spring/default-instance.xml
    105. ##################################################
    106. ######### MQ Properties #############
    107. ##################################################
    108. # aliyun ak/sk , support rds/mq
    109. canal.aliyun.accessKey =
    110. canal.aliyun.secretKey =
    111. canal.aliyun.uid=
    112. canal.mq.flatMessage = true
    113. canal.mq.canalBatchSize = 50
    114. canal.mq.canalGetTimeout = 100
    115. # Set this value to "cloud", if you want open message trace feature in aliyun.
    116. canal.mq.accessChannel = local
    117. canal.mq.database.hash = true
    118. canal.mq.send.thread.size = 30
    119. canal.mq.build.thread.size = 8
    120. ##################################################
    121. ######### Kafka #############
    122. ##################################################
    123. ##kafka地址
    124. kafka.bootstrap.servers = 127.0.0.1:9092
    125. kafka.acks = all
    126. kafka.compression.type = none
    127. kafka.batch.size = 16384
    128. kafka.linger.ms = 1
    129. kafka.max.request.size = 1048576
    130. kafka.buffer.memory = 33554432
    131. kafka.max.in.flight.requests.per.connection = 1
    132. kafka.retries = 0
    133. kafka.kerberos.enable = false
    134. kafka.kerberos.krb5.file = "../conf/kerberos/krb5.conf"
    135. kafka.kerberos.jaas.file = "../conf/kerberos/jaas.conf"
    136. ##################################################
    137. ######### RocketMQ #############
    138. ##################################################
    139. rocketmq.producer.group = test
    140. rocketmq.enable.message.trace = false
    141. rocketmq.customized.trace.topic =
    142. rocketmq.namespace =
    143. rocketmq.namesrv.addr = 127.0.0.1:9876
    144. rocketmq.retry.times.when.send.failed = 0
    145. rocketmq.vip.channel.enabled = false
    146. rocketmq.tag =
    147. ##################################################
    148. ######### RabbitMQ #############
    149. ##################################################
    150. rabbitmq.host =
    151. rabbitmq.virtual.host =
    152. rabbitmq.exchange =
    153. rabbitmq.username =
    154. rabbitmq.password =
    155. rabbitmq.deliveryMode =

    进入canal\conf\example

    编辑instance.properties

    1. #################################################
    2. ## mysql serverId , v1.0.26+ will autoGen
    3. # canal.instance.mysql.slaveId=0
    4. # enable gtid use true/false
    5. canal.instance.gtidon=false
    6. # position info
    7. canal.instance.master.address=127.0.0.1:3306
    8. canal.instance.master.journal.name=
    9. canal.instance.master.position=
    10. canal.instance.master.timestamp=
    11. canal.instance.master.gtid=
    12. # rds oss binlog
    13. canal.instance.rds.accesskey=
    14. canal.instance.rds.secretkey=
    15. canal.instance.rds.instanceId=
    16. # table meta tsdb info
    17. canal.instance.tsdb.enable=true
    18. #canal.instance.tsdb.url=jdbc:mysql://127.0.0.1:3306/canal_tsdb
    19. #canal.instance.tsdb.dbUsername=canal
    20. #canal.instance.tsdb.dbPassword=canal
    21. #canal.instance.standby.address =
    22. #canal.instance.standby.journal.name =
    23. #canal.instance.standby.position =
    24. #canal.instance.standby.timestamp =
    25. #canal.instance.standby.gtid=
    26. # username/password 这里是你的数据库
    27. canal.instance.dbUsername=root
    28. canal.instance.dbPassword=12345678
    29. canal.instance.connectionCharset = UTF-8
    30. # enable druid Decrypt database password
    31. canal.instance.enableDruid=false
    32. #canal.instance.pwdPublicKey=MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALK4BUxdDltRRE5/zXpVEVPUgunvscYFtEip3pmLlhrWpacX7y7GCMo2/JM6LeHmiiNdH1FWgGCpUfircSwlWKUCAwEAAQ==
    33. # table regex 这里是你要监听的表,多个用,隔开
    34. canal.instance.filter.regex=test.user
    35. # table black regex
    36. canal.instance.filter.black.regex=mysql\\.slave_.*
    37. # table field filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)
    38. #canal.instance.filter.field=test1.t_product:id/subject/keywords,test2.t_company:id/name/contact/ch
    39. # table field black filter(format: schema1.tableName1:field1/field2,schema2.tableName2:field1/field2)
    40. #canal.instance.filter.black.field=test1.t_product:subject/product_image,test2.t_company:id/name/contact/ch
    41. # mq config 这里是你要用哪个topic监听
    42. canal.mq.topic=myTestTopic
    43. # dynamic topic route by schema or table regex
    44. #canal.mq.dynamicTopic=mytest1.user,mytest2\\..*,.*\\..*
    45. canal.mq.partition=0
    46. # hash partition config
    47. #canal.mq.partitionsNum=3
    48. #canal.mq.partitionHash=test.table:id^name,.*\\..*
    49. #canal.mq.dynamicTopicPartitionNum=test.*:4,mycanal:6
    50. #################################################

    启动canal

    双击bin目录的 startup.bat

    4、在pom.xml引入Kafka、canal的依赖

    1. <dependency>
    2. <groupId>org.springframework.kafka</groupId>
    3. <artifactId>spring-kafka</artifactId>
    4. </dependency>
    5. <dependency>
    6. <groupId>org.springframework.kafka</groupId>
    7. <artifactId>spring-kafka-test</artifactId>
    8. <scope>test</scope>
    9. </dependency>
    10. <dependency>
    11. <groupId>com.alibaba.otter</groupId>
    12. <artifactId>canal.client</artifactId>
    13. <version>1.1.4</version>
    14. </dependency>

    yml添加Kafka的配置

    1. kafka:
    2. #kafka配置
    3. bootstrap-servers: 127.0.0.1:9092
    4. producer: #生产者
    5. retries: 3 #设置大于0的值,则客户端会将发送失败的记录重新发送的次数
    6. # 每次批量发送消息的数量
    7. batch-size: 16384
    8. buffer-memory: 33554432
    9. # 指定消息key和消息体的编解码方式
    10. key-serializer: org.apache.kafka.common.serialization.StringSerializer
    11. value-serializer: org.apache.kafka.common.serialization.StringSerializer
    12. acks: -1
    13. consumer:
    14. # 指定默认消费者group id
    15. group-id: my-consumer-group
    16. auto-offset-reset: earliest
    17. enable-auto-commit: false
    18. auto-commit-interval: 5000
    19. # 指定消息key和消息体的编解码方式
    20. key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    21. value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    22. listener:
    23. ack-mode: manual_immediate

    5、定义消费者

    1. import com.fan.li.myspringboot.util.RedisClient;
    2. import lombok.extern.slf4j.Slf4j;
    3. import org.apache.kafka.clients.consumer.ConsumerRecord;
    4. import org.springframework.beans.factory.annotation.Autowired;
    5. import org.springframework.kafka.annotation.KafkaListener;
    6. import org.springframework.kafka.support.Acknowledgment;
    7. import org.springframework.stereotype.Component;
    8. /**
    9. * @ClassName CanalConsumer
    10. * @Description TODO
    11. * @Author fan
    12. * @Date 2024/2/23 14:11
    13. * @Version 1.0
    14. */
    15. @Component
    16. @Slf4j
    17. public class CanalConsumer {
    18. @Autowired
    19. RedisClient redisClient;//redis工具类,自己封装即可
    20. @KafkaListener(topics = "myTestTopic", groupId = "canalKafka-groupId")
    21. public void canalListenerKafkaToRedis(ConsumerRecord<String, String> record, Acknowledgment ack) {
    22. //数据表变动的值
    23. String value = record.value();
    24. log.info("数据表发生变动:{}", value);
    25. //简单存入redis
    26. redisClient.setKey("你的key",value);
    27. //手动提交
    28. ack.acknowledge();
    29. }
    30. }

    6、测试:往数据库的表插入一条数据,观察后台日志

  • 相关阅读:
    Redis哨兵模式
    就是一整个爱住,你们大胆飞,我就是最坚强的后盾——Java面试突击宝典
    测试部门来了个00后卷王之王,老油条感叹真干不过,但是...
    ffmpeg音频重采样
    应用程序无法正常启动0xc000007b的解决策略,多种解决方法分享
    几种常见采样方法及原理
    less和scss语法详解
    js根据预设条件定义数组元素
    Ansible-playbook循环学习
    react是否支持给标签设置自定义的属性,比如给video标签设置webkit-playsinline?
  • 原文地址:https://blog.csdn.net/Zxdwr520/article/details/136256992