• 使用spring boot集成shardingsphere分库分表简易测试


    根据如下pom整上一个spring-boot项目,spring-boot版本用2.3.5,shardingsphere用5.1.1。

    1. "1.0" encoding="UTF-8"?>
    2. "http://maven.apache.org/POM/4.0.0"
    3. xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    4. xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    5. 4.0.0
    6. com.shardingsphere
    7. shardingsphere-test
    8. 1.0-SNAPSHOT
    9. org.springframework.boot
    10. spring-boot-starter-parent
    11. 2.3.5.RELEASE
    12. org.springframework.boot
    13. spring-boot-starter-web
    14. org.apache.shardingsphere
    15. shardingsphere-jdbc-core-spring-boot-starter
    16. 5.1.1
    17. com.baomidou
    18. mybatis-plus-boot-starter
    19. 3.3.1
    20. mysql
    21. mysql-connector-java
    22. runtime
    23. org.projectlombok
    24. lombok
    25. org.springframework.boot
    26. spring-boot-starter-test
    27. test
    28. org.junit.vintage
    29. junit-vintage-engine
    30. junit
    31. junit
    32. org.springframework.boot
    33. spring-boot-test
    34. org.springframework
    35. spring-test
    36. compile

    再用mysql整一个名为shardingsphere-test的库(编码集utf8mb4,也可以选自己喜欢的编码集),再往里弄上两个测试表。

    1. CREATE TABLE t_course_1 (
    2. `cid` BIGINT(20) NOT NULL,
    3. `user_id` BIGINT(20) DEFAULT NULL,
    4. `cname` VARCHAR(50) DEFAULT NULL,
    5. `brief` VARCHAR(50) DEFAULT NULL,
    6. `price` DOUBLE DEFAULT NULL,
    7. `status` INT(11) DEFAULT NULL,
    8. PRIMARY KEY (`cid`)
    9. ) ENGINE=INNODB DEFAULT CHARSET=utf8mb4;
    10. CREATE TABLE t_course_2 (
    11. `cid` BIGINT(20) NOT NULL,
    12. `user_id` BIGINT(20) DEFAULT NULL,
    13. `cname` VARCHAR(50) DEFAULT NULL,
    14. `brief` VARCHAR(50) DEFAULT NULL,
    15. `price` DOUBLE DEFAULT NULL,
    16. `status` INT(11) DEFAULT NULL,
    17. PRIMARY KEY (`cid`)
    18. ) ENGINE=INNODB DEFAULT CHARSET=utf8mb4;

    项目结构如下。

    application.yml的配置及解释如下:

    1. spring:
    2. application:
    3. name: sharding-jdbc-demo # 应用名称
    4. shardingsphere:
    5. # 设置全局属性
    6. props:
    7. # 打开SQL显示功能,将会打印出执行的原始SQL和实际路由后的SQL
    8. sql-show: true
    9. # 数据源配置
    10. datasource:
    11. # 定义数据源名称列表,这里只有一个名为db1的数据源
    12. names: db1
    13. # 数据源db1的具体配置
    14. db1:
    15. # 数据源实现类,此处使用HikariCP连接池
    16. type: com.zaxxer.hikari.HikariDataSource
    17. # JDBC驱动类名,对应MySQL数据库驱动
    18. driver-class-name: com.mysql.jdbc.Driver
    19. # 数据库连接URL,连接本地MySQL服务器上的shardingsphere-test数据库,并设置字符编码和禁用SSL
    20. url: jdbc:mysql://127.0.0.1:3306/shardingsphere-test?characterEncoding=UTF-8&useSSL=false
    21. # 数据库用户名
    22. username: root
    23. # 数据库密码
    24. password: root
    25. # 规则配置部分
    26. rules:
    27. # 分片规则相关配置
    28. sharding:
    29. # 1.定义分片表的实际分布情况
    30. tables:
    31. # 针对表t_course的分片配置
    32. t_course:
    33. # 实际数据节点分布,表明t_course表被分成两张表,分别命名为t_course_0和t_course_1,并存储在db1数据库中
    34. actual-data-nodes: "db1.t_course_$->{1..2}"
    35. # 配置策略
    36. table-strategy:
    37. # 用于单分片键的标准分片场景
    38. standard:
    39. sharding-column: cid
    40. # 分片算法名字
    41. sharding-algorithm-name: course_inline
    42. # 分布式主键生成策略配置
    43. key-generate-strategy:
    44. # 主键列名为cid
    45. column: cid
    46. # 引用已定义的分布式序列生成器 alg-snowflake
    47. key-generator-name: snowflake
    48. # 2.定义分布式序列生成器
    49. key-generators:
    50. # 定义名为alg-snowflake的分布式序列生成器 alg-snowflake
    51. snowflake:
    52. # 类型为SNOWFLAKE算法,用于生成全局唯一ID
    53. type: SNOWFLAKE
    54. # 3.定义分片算法
    55. sharding-algorithms:
    56. # 定义名为table-inline的分片算法
    57. course_inline:
    58. # 使用INLINE类型的行表达式分片算法
    59. type: INLINE
    60. # 算法属性配置
    61. props:
    62. # 行表达式算法具体内容,按照cid模21的值来确定数据落入哪张表,例如:cid%2+1=0,则落入t_course_1,等于1则落入t_course_2
    63. algorithm-expression: t_course_$->{cid % 2 + 1}
    64. # 4.定义分片策略
    65. #sharding-strategies:
    66. # 对于表t_course的分片策略定义
    67. #t_course_strategy:
    68. # 使用标准分片策略
    69. #type: STANDARD
    70. # 指定分片键为cid列
    71. #sharding-column: cid
    72. # 引用已定义的分片算法
    73. #sharding-algorithm-name: course_inline
    74. # SQL输出日志
    75. mybatis-plus:
    76. configuration:
    77. log-impl: org.apache.ibatis.logging.stdout.StdOutImpl

    再弄一个对应数据库的实体类。

    1. import com.baomidou.mybatisplus.annotation.IdType;
    2. import com.baomidou.mybatisplus.annotation.TableId;
    3. import com.baomidou.mybatisplus.annotation.TableName;
    4. import lombok.Data;
    5. import lombok.ToString;
    6. @TableName("t_course")
    7. @Data
    8. @ToString
    9. public class Course {
    10. //@TableId(type = IdType.AUTO)
    11. @TableId
    12. private Long cid;
    13. private Long userId;
    14. private Long corderNo;
    15. private String cname;
    16. private String brief;
    17. private Double price;
    18. private Integer status;
    19. }

    再搞一个mapper。

    1. import com.baomidou.mybatisplus.core.mapper.BaseMapper;
    2. import com.shardingsphere.demo.entity.Course;
    3. import org.apache.ibatis.annotations.Mapper;
    4. @Mapper
    5. public interface CourseMapper extends BaseMapper {
    6. }

    再搞一个service接口。

    1. public interface CoureInterface {
    2. public void addCoure();
    3. }

    按道上的规矩再搞一个service接口的实现类。

    1. import com.shardingsphere.demo.coure.CoureInterface;
    2. import com.shardingsphere.demo.entity.Course;
    3. import com.shardingsphere.demo.mapper.CourseMapper;
    4. import org.springframework.beans.factory.annotation.Autowired;
    5. import org.springframework.stereotype.Service;
    6. @Service
    7. public class CoureInterfaceImpl implements CoureInterface {
    8. @Autowired
    9. private CourseMapper courseMapper;
    10. @Override
    11. public void addCoure() {
    12. for (int i = 0; i < 30; i++) {
    13. Course course = new Course();
    14. // 注意: cid使用雪花算法配置了(还可以使用MybatisPlus UUID),此处不用配置
    15. course.setUserId(1000L + i);
    16. course.setCname("ShardingSphere");
    17. course.setBrief("ShardingSphere测试");
    18. course.setPrice(66.6);
    19. course.setStatus(1);
    20. courseMapper.insert(course);
    21. }
    22. }
    23. }

    再弄上一个controller,来接收远方的呼唤。

    1. import com.shardingsphere.demo.coure.CoureInterface;
    2. import org.springframework.beans.factory.annotation.Autowired;
    3. import org.springframework.web.bind.annotation.RequestMapping;
    4. import org.springframework.web.bind.annotation.RestController;
    5. @RestController
    6. public class CourseController {
    7. @Autowired
    8. private CoureInterface coureInterface;
    9. @RequestMapping(path = "/demo/addCourse")
    10. public void addCourse(){
    11. coureInterface.addCoure();
    12. }
    13. }

    最后再弄上一个spring boot的启动类,用来启动这个sping boot项目。

    1. import org.springframework.boot.SpringApplication;
    2. import org.springframework.boot.autoconfigure.SpringBootApplication;
    3. @SpringBootApplication
    4. public class ShardingSphereTest {
    5. public static void main(String[] args) {
    6. SpringApplication.run(ShardingSphereTest.class, args);
    7. }
    8. }

    我们启动服务后,直接浏览器访问这个接口简单测试一下。

    localhost:8080/demo/addCourse

    再去数据库看一眼,发现如我们所想,数据已经被拆分到两个表中了。

    趁热打铁,我们再进阶一小点,把库也给分了算了。

    再找一台机器创建一个跟上面一模一样的数据库(shardingsphere-test),库里的表也跟上面建的一模一样两张表(t_course_1,t_course_2),这样我们就富裕了,有了俩数据库。

    需要改造的地方就是我们的application.yml配置文件,加上分库操作。

    1. spring:
    2. application:
    3. name: sharding-jdbc-demo-database # 应用名称
    4. shardingsphere:
    5. # 设置全局属性
    6. props:
    7. # 打开SQL显示功能,将会打印出执行的原始SQL和实际路由后的SQL
    8. sql-show: true
    9. # 数据源配置
    10. datasource:
    11. # 定义数据源名称列表,这里有两个数据源(数据的名字可以一样,也可以不一样)
    12. names: db1, db2
    13. # 数据源db1的具体配置
    14. db1:
    15. # 数据源实现类,此处使用HikariCP连接池
    16. type: com.zaxxer.hikari.HikariDataSource
    17. # JDBC驱动类名,对应MySQL数据库驱动
    18. driver-class-name: com.mysql.jdbc.Driver
    19. # 数据库连接URL,连接本地MySQL服务器上的shardingsphere-test数据库,并设置字符编码和禁用SSL
    20. url: jdbc:mysql://127.0.0.1:3306/shardingsphere-test?characterEncoding=UTF-8&useSSL=false
    21. # 数据库用户名
    22. username: root
    23. # 数据库密码
    24. password: root
    25. # 数据源db1的具体配置
    26. db2:
    27. # 数据源实现类,此处使用HikariCP连接池
    28. type: com.zaxxer.hikari.HikariDataSource
    29. # JDBC驱动类名,对应MySQL数据库驱动
    30. driver-class-name: com.mysql.jdbc.Driver
    31. # 数据库连接URL,连接本地MySQL服务器上的shardingsphere-test数据库,并设置字符编码和禁用SSL
    32. url: jdbc:mysql://124.223.XXX.XXX:3306/shardingsphere-test?characterEncoding=UTF-8&useSSL=false
    33. # 数据库用户名
    34. username: root
    35. # 数据库密码
    36. password: root
    37. # 规则配置部分
    38. rules:
    39. # 分片规则相关配置
    40. sharding:
    41. # 1.定义分片表的实际分布情况
    42. tables:
    43. # 针对表t_course的分片配置
    44. t_course:
    45. # 实际数据节点分布,表明t_course表被分成两张表,分别命名为t_course_0和t_course_1,并存储在db1数据库中
    46. actual-data-nodes: "db$->{1..2}.t_course_$->{1..2}"
    47. # 配置库策略
    48. database-strategy:
    49. standard:
    50. sharding-column: user_id
    51. sharding-algorithm-name: table_inline
    52. # 配置表策略
    53. table-strategy:
    54. # 用于单分片键的标准分片场景
    55. standard:
    56. sharding-column: cid
    57. # 分片算法名字
    58. sharding-algorithm-name: course_inline
    59. # 分布式主键生成策略配置
    60. key-generate-strategy:
    61. # 主键列名为cid
    62. column: cid
    63. # 引用已定义的分布式序列生成器 alg-snowflake
    64. key-generator-name: snowflake
    65. # 2.定义分布式序列生成器
    66. key-generators:
    67. # 定义名为alg-snowflake的分布式序列生成器 alg-snowflake
    68. snowflake:
    69. # 类型为SNOWFLAKE算法,用于生成全局唯一ID
    70. type: SNOWFLAKE
    71. # 3.定义分片算法
    72. sharding-algorithms:
    73. #定义库分片算法
    74. table_inline:
    75. type: INLINE
    76. props:
    77. algorithm-expression: db$->{user_id % 2 + 1}
    78. # 定义名为table-inline的分片算法,表分片
    79. course_inline:
    80. # 使用INLINE类型的行表达式分片算法
    81. type: INLINE
    82. # 算法属性配置
    83. props:
    84. # 行表达式算法具体内容,按照cid模21的值来确定数据落入哪张表,例如:cid%2+1=0,则落入t_course_1,等于1则落入t_course_2
    85. algorithm-expression: t_course_$->{cid % 2 + 1}
    86. # 4.定义分片策略
    87. #sharding-strategies:
    88. # 对于表t_course的分片策略定义
    89. #t_course_strategy:
    90. # 使用标准分片策略
    91. #type: STANDARD
    92. # 指定分片键为cid列
    93. #sharding-column: cid
    94. # 引用已定义的分片算法
    95. #sharding-algorithm-name: course_inline
    96. # SQL输出日志
    97. mybatis-plus:
    98. configuration:
    99. log-impl: org.apache.ibatis.logging.stdout.StdOutImpl

    然后重启服务,重新访问localhost:8080/demo/addCourse 调用添加数据的服务接口,此时再查数据库就会发现数据已经被shardingsphere分到不同库的不同表里了。

    分库查询

            入库了以后,我们写个测试类尝试查询一下,看看会是怎么样。

    1. @SpringBootTest
    2. @RunWith(SpringRunner.class)
    3. @Slf4j
    4. public class MyTest {
    5. @Autowired(required = false)
    6. private CourseMapper courseMapper;
    7. /**
    8. * 水平分片:查询所有记录
    9. * 查询了两个数据源,每个数据源中使用UNION ALL连接两个表
    10. */
    11. @Test
    12. public void testShardingSelectOne(){
    13. List courses = courseMapper.selectList(null);
    14. courses.forEach(System.out::println);
    15. }
    16. }

    我们来看一下查询结果。

    1. Creating a new SqlSession
    2. SqlSession [org.apache.ibatis.session.defaults.DefaultSqlSession@30b0d5a7] was not registered for synchronization because synchronization is not active
    3. JDBC Connection [org.apache.shardingsphere.driver.jdbc.core.connection.ShardingSphereConnection@411e567e] will not be managed by Spring
    4. ==> Preparing: SELECT cid,user_id,cname,brief,price,status FROM t_course
    5. ==> Parameters:
    6. 2024-04-19 21:17:29.909 INFO 25740 --- [ main] ShardingSphere-SQL : Logic SQL: SELECT cid,user_id,cname,brief,price,status FROM t_course
    7. 2024-04-19 21:17:29.909 INFO 25740 --- [ main] ShardingSphere-SQL : SQLStatement: MySQLSelectStatement(table=Optional.empty, limit=Optional.empty, lock=Optional.empty, window=Optional.empty)
    8. 2024-04-19 21:17:29.910 INFO 25740 --- [ main] ShardingSphere-SQL : Actual SQL: db1 ::: SELECT cid,user_id,cname,brief,price,status FROM t_course_1 UNION ALL SELECT cid,user_id,cname,brief,price,status FROM t_course_2
    9. 2024-04-19 21:17:29.910 INFO 25740 --- [ main] ShardingSphere-SQL : Actual SQL: db2 ::: SELECT cid,user_id,cname,brief,price,status FROM t_course_1 UNION ALL SELECT cid,user_id,cname,brief,price,status FROM t_course_2
    10. <== Columns: cid, user_id, cname, brief, price, status
    11. <== Row: 1781311094111928321, 1000, ShardingSphere, ShardingSphere尝试查询, 99.0, 1
    12. <== Row: 1781311097660309505, 1002, ShardingSphere, ShardingSphere尝试查询, 99.0, 1
    13. <== Row: 1781311098117488641, 1004, ShardingSphere, ShardingSphere尝试查询, 99.0, 1
    14. <== Row: 1781311098650165249, 1006, ShardingSphere, ShardingSphere尝试查询, 99.0, 1
    15. <== Row: 1781311097660309506, 1003, ShardingSphere, ShardingSphere尝试查询, 99.0, 1
    16. <== Row: 1781311098184597506, 1005, ShardingSphere, ShardingSphere尝试查询, 99.0, 1
    17. <== Row: 1781311098650165250, 1007, ShardingSphere, ShardingSphere尝试查询, 99.0, 1
    18. <== Row: 1781311096750145537, 1001, ShardingSphere, ShardingSphere尝试查询, 99.0, 1
    19. <== Total: 8

    可以看到,它是把每个库的每张表进行union操作,返回返回总结果。

    多表关联查询

            如果我们要是多表查询呢?先建上两张有关联的表来试一下。如下两个数据库分别创建t_order0,t_order1,t_order_item0,t_order_item1,仍然分库分表创建,只不过让t_order和t_order_item有联系,即有如此关联:SELECT o.*,i.* FROM t_order o JOIN t_order_item i ON o.order_no = i.order_no;

    1. CREATE TABLE t_order0 (
    2. id BIGINT,
    3. order_no VARCHAR(30),
    4. user_id BIGINT,
    5. amount DECIMAL(10,2),
    6. PRIMARY KEY(id)
    7. );
    8. CREATE TABLE t_order1 (
    9. id BIGINT,
    10. order_no VARCHAR(30),
    11. user_id BIGINT,
    12. amount DECIMAL(10,2),
    13. PRIMARY KEY(id)
    14. );
    15. CREATE TABLE t_order_item0(
    16. id BIGINT,
    17. order_no VARCHAR(30),
    18. user_id BIGINT,
    19. price DECIMAL(10,2),
    20. `count` INT,
    21. PRIMARY KEY(id)
    22. );
    23. CREATE TABLE t_order_item1(
    24. id BIGINT,
    25. order_no VARCHAR(30),
    26. user_id BIGINT,
    27. price DECIMAL(10,2),
    28. `count` INT,
    29. PRIMARY KEY(id)
    30. );

    再在配置文件中将这俩表的配置搞好。

    1. spring:
    2. application:
    3. name: sharding-jdbc-demo-database # 应用名称
    4. shardingsphere:
    5. # 设置全局属性
    6. props:
    7. # 打开SQL显示功能,将会打印出执行的原始SQL和实际路由后的SQL
    8. sql-show: true
    9. # 数据源配置
    10. datasource:
    11. # 定义数据源名称列表,这里有两个数据源(数据的名字可以一样,也可以不一样)
    12. names: db1, db2
    13. # 数据源db1的具体配置
    14. db1:
    15. # 数据源实现类,此处使用HikariCP连接池
    16. type: com.zaxxer.hikari.HikariDataSource
    17. # JDBC驱动类名,对应MySQL数据库驱动
    18. driver-class-name: com.mysql.jdbc.Driver
    19. # 数据库连接URL,连接本地MySQL服务器上的shardingsphere-test数据库,并设置字符编码和禁用SSL
    20. url: jdbc:mysql://127.0.0.1:3306/shardingsphere-test?characterEncoding=UTF-8&useSSL=false
    21. # 数据库用户名
    22. username: root
    23. # 数据库密码
    24. password: root
    25. # 数据源db1的具体配置
    26. db2:
    27. # 数据源实现类,此处使用HikariCP连接池
    28. type: com.zaxxer.hikari.HikariDataSource
    29. # JDBC驱动类名,对应MySQL数据库驱动
    30. driver-class-name: com.mysql.jdbc.Driver
    31. # 数据库连接URL,连接本地MySQL服务器上的shardingsphere-test数据库,并设置字符编码和禁用SSL
    32. url: jdbc:mysql://124.223.XXX.XXX:3306/shardingsphere-test?characterEncoding=UTF-8&useSSL=false
    33. # 数据库用户名
    34. username: root
    35. # 数据库密码
    36. password: 123456
    37. # 规则配置部分
    38. rules:
    39. # 分片规则相关配置
    40. sharding:
    41. # 1.定义分片表的实际分布情况
    42. tables:
    43. # 针对表t_course的分片配置
    44. t_course:
    45. # 实际数据节点分布,表明t_course表被分成两张表,分别命名为t_course_0和t_course_1,并存储在db1数据库中
    46. actual-data-nodes: "db$->{1..2}.t_course_$->{1..2}"
    47. # 配置库策略
    48. database-strategy:
    49. standard:
    50. sharding-column: user_id
    51. sharding-algorithm-name: table_inline
    52. # 配置表策略
    53. table-strategy:
    54. # 用于单分片键的标准分片场景
    55. standard:
    56. sharding-column: cid
    57. # 分片算法名字
    58. sharding-algorithm-name: course_inline
    59. # 分布式主键生成策略配置
    60. key-generate-strategy:
    61. # 主键列名为cid
    62. column: cid
    63. # 引用已定义的分布式序列生成器 alg-snowflake
    64. key-generator-name: snowflake
    65. #order表的分片分库策略
    66. t_order:
    67. # 实际数据节点分布,表明t_course表被分成两张表,分别命名为t_course_0和t_course_1,并存储在db1数据库中
    68. actual-data-nodes: "db$->{1..2}.t_order$->{0..1}"
    69. # 配置分库策略
    70. database-strategy:
    71. standard:
    72. sharding-column: user_id
    73. sharding-algorithm-name: order_inline
    74. # 配置分表策略
    75. table-strategy:
    76. # 用于单分片键的标准分片场景,根据order_no的hash值进行分片
    77. standard:
    78. sharding-column: order_no
    79. # 分片算法名字
    80. sharding-algorithm-name: order_no_mod
    81. # 分布式主键生成策略配置
    82. key-generate-strategy:
    83. # 主键列名为cid
    84. column: id
    85. # 引用已定义的分布式序列生成器 alg-snowflake
    86. key-generator-name: snowflake
    87. #order表的分片分库策略
    88. t_order_item:
    89. # 实际数据节点分布,表明t_course表被分成两张表,分别命名为t_course_0和t_course_1,并存储在db1数据库中
    90. actual-data-nodes: "db$->{1..2}.t_order_item$->{0..1}"
    91. # 配置分库策略
    92. database-strategy:
    93. standard:
    94. sharding-column: user_id
    95. sharding-algorithm-name: order_inline
    96. # 配置分表策略
    97. table-strategy:
    98. # 用于单分片键的标准分片场景,根据order_no的hash值进行分片
    99. standard:
    100. sharding-column: order_no
    101. # 分片算法名字
    102. sharding-algorithm-name: order_no_mod
    103. # 分布式主键生成策略配置
    104. key-generate-strategy:
    105. # 主键列名为cid
    106. column: id
    107. # 引用已定义的分布式序列生成器 alg-snowflake
    108. key-generator-name: snowflake
    109. # 2.定义分布式序列生成器
    110. key-generators:
    111. # 定义名为alg-snowflake的分布式序列生成器 alg-snowflake
    112. snowflake:
    113. # 类型为SNOWFLAKE算法,用于生成全局唯一ID
    114. type: SNOWFLAKE
    115. # 3.定义你想配置表的分片算法
    116. sharding-algorithms:
    117. #定义库分片算法
    118. table_inline:
    119. type: INLINE
    120. props:
    121. algorithm-expression: db$->{user_id % 2 + 1}
    122. # 定义名为table-inline的分片算法,表分片
    123. course_inline:
    124. # 使用INLINE类型的行表达式分片算法
    125. type: INLINE
    126. # 算法属性配置
    127. props:
    128. # 行表达式算法具体内容,按照cid模21的值来确定数据落入哪张表,例如:cid%2+1=0,则落入t_course_1,等于1则落入t_course_2
    129. algorithm-expression: t_course_$->{cid % 2 + 1}
    130. order_inline:
    131. type: INLINE
    132. props:
    133. algorithm-expression: db$->{user_id % 2 + 1}
    134. order_no_mod:
    135. # 使用HASH_MOD类型的行表达式分片算法
    136. type: HASH_MOD
    137. # 算法属性配置
    138. props:
    139. # 行表达式算法具体内容,
    140. sharding-count: 2
    141. # 4.定义分片策略
    142. #sharding-strategies:
    143. # 对于表t_course的分片策略定义
    144. #t_course_strategy:
    145. # 使用标准分片策略
    146. #type: STANDARD
    147. # 指定分片键为cid列
    148. #sharding-column: cid
    149. # 引用已定义的分片算法
    150. #sharding-algorithm-name: course_inline
    151. # SQL输出日志
    152. mybatis-plus:
    153. configuration:
    154. log-impl: org.apache.ibatis.logging.stdout.StdOutImpl

    OrderMapper改造一下,进行表关联。 

    1. import com.baomidou.mybatisplus.core.mapper.BaseMapper;
    2. import com.shardingsphere.demo.entity.Order;
    3. import com.shardingsphere.demo.vo.OrderVo;
    4. import org.apache.ibatis.annotations.Mapper;
    5. import org.apache.ibatis.annotations.Select;
    6. import java.util.List;
    7. @Mapper
    8. public interface OrderMapper extends BaseMapper {
    9. @Select({"SELECT o.order_no, SUM(i.price * i.count) AS amount",
    10. "FROM t_order o JOIN t_order_item i ON o.order_no = i.order_no",
    11. "GROUP BY o.order_no"})
    12. List getOrderAmount();
    13. }

    再写个测试方法。

    1. /**
    2. * 测试关联表查询
    3. */
    4. @Test
    5. public void testGetOrderAmount(){
    6. List orderAmountList = orderMapper.getOrderAmount();
    7. orderAmountList.forEach(System.out::println);
    8. }

    看看执行结果:

    1. ==> Preparing: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order o JOIN t_order_item i ON o.order_no = i.order_no GROUP BY o.order_no
    2. ==> Parameters:
    3. 2024-04-19 21:27:52.938 INFO 21784 --- [ main] ShardingSphere-SQL : Logic SQL: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order o JOIN t_order_item i ON o.order_no = i.order_no GROUP BY o.order_no
    4. 2024-04-19 21:27:52.939 INFO 21784 --- [ main] ShardingSphere-SQL : SQLStatement: MySQLSelectStatement(table=Optional.empty, limit=Optional.empty, lock=Optional.empty, window=Optional.empty)
    5. 2024-04-19 21:27:52.939 INFO 21784 --- [ main] ShardingSphere-SQL : Actual SQL: db2 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order0 o JOIN t_order_item0 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    6. 2024-04-19 21:27:52.939 INFO 21784 --- [ main] ShardingSphere-SQL : Actual SQL: db2 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order1 o JOIN t_order_item0 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    7. 2024-04-19 21:27:52.939 INFO 21784 --- [ main] ShardingSphere-SQL : Actual SQL: db2 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order0 o JOIN t_order_item1 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    8. 2024-04-19 21:27:52.939 INFO 21784 --- [ main] ShardingSphere-SQL : Actual SQL: db2 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order1 o JOIN t_order_item1 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    9. 2024-04-19 21:27:52.939 INFO 21784 --- [ main] ShardingSphere-SQL : Actual SQL: db1 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order0 o JOIN t_order_item0 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    10. 2024-04-19 21:27:52.939 INFO 21784 --- [ main] ShardingSphere-SQL : Actual SQL: db1 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order1 o JOIN t_order_item0 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    11. 2024-04-19 21:27:52.939 INFO 21784 --- [ main] ShardingSphere-SQL : Actual SQL: db1 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order0 o JOIN t_order_item1 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    12. 2024-04-19 21:27:52.939 INFO 21784 --- [ main] ShardingSphere-SQL : Actual SQL: db1 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order1 o JOIN t_order_item1 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    13. <== Columns: order_no, amount
    14. <== Row: ShardingSphere1, 40.00
    15. <== Row: ShardingSphere2, 40.00
    16. <== Row: ShardingSphere5, 6.00
    17. <== Row: ShardingSphere6, 6.00
    18. <== Total: 4

            发现了一个问题:可以看到同一个数据源中,查询的次数是t_ordert_order_item的笛卡尔积数量,但是t_order0中的订单数据只会在对应数据源中t_order_item0,不会在t_order_item1中,所以有些关联查询是没有意义的,那么接下来就引入了绑定表的概念。

    绑定表

            指分片规则一致的一组分片表。 使用绑定表进行多表关联查询时,必须使用分片键进行关联,否则会出现笛卡尔积关联或跨库关联,从而影响查询效率。所以我们来配置一下,将t_ordert_order_item绑定一下,只需要在rules这里增加如下配置,如果你的配置文件是.properties类型的,需要这样配:spring.shardingsphere.rules.sharding.binding-tables[0]=t_order,t_order_item

    1. # 规则配置部分
    2. rules:
    3. # 分片规则相关配置
    4. sharding:
    5. #绑定表
    6. binding-tables:
    7. - t_order,t_order_item

    再次查询:

    1. ==> Preparing: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order o JOIN t_order_item i ON o.order_no = i.order_no GROUP BY o.order_no
    2. ==> Parameters:
    3. 2024-04-19 21:43:08.069 INFO 24608 --- [ main] ShardingSphere-SQL : Logic SQL: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order o JOIN t_order_item i ON o.order_no = i.order_no GROUP BY o.order_no
    4. 2024-04-19 21:43:08.069 INFO 24608 --- [ main] ShardingSphere-SQL : SQLStatement: MySQLSelectStatement(table=Optional.empty, limit=Optional.empty, lock=Optional.empty, window=Optional.empty)
    5. 2024-04-19 21:43:08.070 INFO 24608 --- [ main] ShardingSphere-SQL : Actual SQL: db1 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order0 o JOIN t_order_item0 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    6. 2024-04-19 21:43:08.070 INFO 24608 --- [ main] ShardingSphere-SQL : Actual SQL: db1 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order1 o JOIN t_order_item1 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    7. 2024-04-19 21:43:08.070 INFO 24608 --- [ main] ShardingSphere-SQL : Actual SQL: db2 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order0 o JOIN t_order_item0 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    8. 2024-04-19 21:43:08.070 INFO 24608 --- [ main] ShardingSphere-SQL : Actual SQL: db2 ::: SELECT o.order_no, SUM(i.price * i.count) AS amount FROM t_order1 o JOIN t_order_item1 i ON o.order_no = i.order_no GROUP BY o.order_no ORDER BY o.order_no ASC
    9. <== Columns: order_no, amount
    10. <== Row: ShardingSphere1, 40.00
    11. <== Row: ShardingSphere2, 40.00
    12. <== Row: ShardingSphere5, 6.00
    13. <== Row: ShardingSphere6, 6.00
    14. <== Total: 4

    突然一家伙少了四条查询。

    • 如果不配置绑定表:测试的结果为8个SQL。 多表关联查询会出现笛卡尔积关联。
    • 如果配置绑定表:测试的结果为4个SQL。 多表关联查询不会出现笛卡尔积关联,关联查询效率将大大提升。
  • 相关阅读:
    链式队列的基本操作与实现(数据结构与算法)
    【Java SE】基本练习
    【三人一机】
    使用c#强大的表达式树实现对象的深克隆之解决循环引用的问题
    写一个Webpack Plugin?其实很简单,看这些你就明白了
    第七章 查找 八、B树
    干货分享 | TSMaster RPC 基础入门:编程指导和使用说明
    趣店预制菜爆火背后,是一场慢节奏的长期主义
    LeetCode练习4——删除有序数组中的重复项
    【HTML5期末大作业】制作一个简单HTML我的班级网页(HTML+CSS+JS)
  • 原文地址:https://blog.csdn.net/weixin_43970625/article/details/137962390