• 测试dble split功能执行+导入耗时shell脚本参考


    测试 split 计时 shell 脚本参考

    #!/bin/bash
    # set -euxo
    #如果是一个新的环境,则需要设置变量TEST_ENV="new"
    TEST_ENV=
    #如果在dble本机导入split后的mysqldump文件,则需要设置变量LOADIN="locally",
    #如果需要将split后的mysqldump文件cp到后端mysql所在主机去执行导入,则需要设置变量LOADIN="remote"
    LOADIN="locally"
    test_dir='/tmp/splittest/'
    dble_conf_dir='/opt/dble/conf/'
    ex_dump_file_name='benchmarksql_with_data'
    dump_file_name='benchmarksql_with_data'
    shardingNum=10
    ex_shardingNum=10
    MYSQL='/usr/bin/mysql -uxxxx -pxxxx -P3306'
    MYSQL_HOST[0]='xx.xx.xx.101'
    MYSQL_HOST[1]='xx.xx.xx.102'
    MYSQL_HOST[2]='xx.xx.xx.105'
    MYSQL_HOST[3]='xx.xx.xx.108'
    DBLE_ROOT='/usr/bin/mysql -uroot -p111111 -P9066 -h127.0.0.1'
    
    function create_backend_mysqls_from_dble_cmd () {
        #make file drop_and_create_shardingnodes_10dn for further test
        drop_ex_shardingnodes="${test_dir}drop_ex_shardingnodes_dnx.sql"
        create_shardingnodes="${test_dir}create_shardingnodes_dnx.sql"
        rm -f ${drop_ex_shardingnodes}
        cat>"${drop_ex_shardingnodes}"<<EOFDROP
    drop database @@shardingnode="an\$1-${ex_shardingNum}";
    EOFDROP
        rm -f ${create_shardingnodes}
        cat>"${create_shardingnodes}"<<EOFCREATE
    reload @@config_all;
    create database @@shardingnode="an\$1-${shardingNum}";
    EOFCREATE
    
        ${DBLE_ROOT} < ${drop_ex_shardingnodes}
        if [[ $? != 0 ]]; then
            echo "fail to drop backend schema via dble admin cmd line"
            exit 1
        fi
        #change sharding.xml conf of new_sharding.xml
        mv ${dble_conf_dir}sharding.xml ${dble_conf_dir}sharding_${ex_shardingNum}.xml
        mv ${dble_conf_dir}sharding_${shardingNum}.xml ${dble_conf_dir}sharding.xml
        ${DBLE_ROOT} < ${create_shardingnodes}
        if [[ $? != 0 ]]; then
            echo "fail to create backend schema via dble admin cmd line"
            exit 1
        fi
    
        # check backend mysqldbs has been created
        ${MYSQL} -h${MYSQL_HOST[1]} -e "show databases;" | grep "dh_dn_" 
        if [[ $? != 0 ]]; then
            echo "fail to create backend schema via dble admin cmd line"
        fi  
    }
    
    function generate_rm_old_split_dump_files () {
        filename="${test_dir}rm_old_split_dump_files.sh"
        rm -f ${filename}
    cat>"${filename}"<<EOF
    #!/usr/bin/expect -f
    set timeout -1
    set host [lindex \$argv 0]
    set file_name [lindex \$argv 1]
    spawn ssh test@\$host "rm -f \$file_name.sql-an*;ls -l /tmp/splittest/"
    expect "*continue connecting*"
    send "yes\r"
    
    expect "*password:"
    send "mypasswordfortestuser\r"
    expect eof
    EOF
    }
    
    function rm_old_dump_files_on_each_shardingnodes () {
        rm -f ${test_dir}${ex_dump_file_name}.sql-an*
        # 新环境中首次跑测试的时候需要生成这样一个文件 ${test_dir}rm_old_split_dump_files.sh
        if [[ "${TEST_ENV}" == "new" ]]; then
            generate_rm_old_split_dump_files
        fi
        
        for host in "${MYSQL_HOST[@]}";do
            echo "${host}"   
            expect ${test_dir}rm_old_split_dump_files.sh "${host}" "${test_dir}${ex_dump_file_name}"
        done
    
    }
    
    # split_cmd.sql 文件中有 dump_file_name,此处还可以参数化 -l等参数的数值
    function get_split_exec_cost_time () {
        filename="${test_dir}split_cmd.sql"
        rm -f ${filename}
        cat>"${filename}"<<EOF
    split /opt/splitTest/${dump_file_name}.sql /tmp/splittest -sbenchmarksql;
    EOF
        {
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> start exec split cmd on dble manager 9066"
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
            # echo "${DBLE_ROOT} < ${filename}"
            echo "split /opt/splitTest/${dump_file_name}.sql /tmp/splittest -sbenchmarksql; "
        }>> time_record.log
    
        timer_start=$(date +%s)
        ${DBLE_ROOT} < ${filename}
        if [[ $? != 0 ]]; then
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) !!!!!!!!!!! fail to exec !!!!!!!!!!" >> time_record.log
        fi
        timer_end=$(date +%s)
        duration=$(echo "$timer_end" "$timer_start" | awk '{print $1-$2}')s
        {
            echo "dble管理端执行split的耗时: ${duration} "
            echo "total time is ${duration} "
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> finish exec split cmd on dble manager 9066"
            echo " "
        }>> time_record.log
            
    }
    
    # dble本机远程连接后端mysql,分别并发导入
    function loadAll_shardingDumpFiles_on_dble_server () {
        {
            echo " "
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> loadAll_shardingDumpFiles_on_dble_server  startTime"
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
        }>> time_record.log
    
        timer_s=$(date +%s)
        
        for ((i=1;i<="${shardingNum}";i++))
        do
        {
            if [ ! -f "${test_dir}${dump_file_name}.sql-an${i}.sql" ]; then
                mv ${test_dir}${dump_file_name}.sql-an"${i}"-*dump ${test_dir}${dump_file_name}.sql-an"${i}".sql
            fi   
    
            node=$(( i % 4 ))
    
            case ${node} in
                1) host=${MYSQL_HOST[0]};;
                2) host=${MYSQL_HOST[1]};;
                3) host=${MYSQL_HOST[2]};;
                0) host=${MYSQL_HOST[3]};;
            esac
    
            echo "${MYSQL} -h${host} dh_dn_${i} < ${test_dir}${dump_file_name}.sql-an${i}.sql" >>  time_record.log
            ${MYSQL} -h${host} dh_dn_"${i}" < ${test_dir}${dump_file_name}.sql-an"${i}".sql
            
        }&    
        done
        wait
        timer_e=$(date +%s)
        tduration=$(echo "$timer_e" "$timer_s" | awk '{print $1-$2}')s
        {
            echo "split后的dumpfile文件从dble本机并发导入,总时长(即最长耗时)为:   ${tduration}  "
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> loadAll_shardingDumpFiles_on_dble_server  finishTime"
            echo " "
        }>> time_record.log
    }
    
    
    # local导入耗时不需要此处
    function generate_send_split_dump_files () {
        filename="${test_dir}send_split_dump_files.sh"
        rm -f ${filename}
    cat>"${filename}"<<EOF
    #!/usr/bin/expect -f
    set timeout -1
    set file_num [lindex \$argv 0]
    set host [lindex \$argv 1]
    set file_name [lindex \$argv 2]
    spawn scp \$file_name.sql-an\$file_num.sql test@\$host:/tmp/splittest/.
    expect "*continue connecting*"
    send "yes\r"
    
    expect "*password:"
    send "mypasswordfortestuser\r"
    expect eof
    EOF
    }
    
    # local导入耗时不需要此处
    function change_split_dumpfile_name_and_cp_them_to_their_local () {
        # 新环境中首次跑测试的时候需要生成这样一个文件 ${test_dir}t_dump_files.sh
        if [[ ("${TEST_ENV}" == "new") && ("${LOADIN}" == "remote") ]]; then
            generate_send_split_dump_files
        fi
        
        {
            echo " "
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> start change split filename and cp them to their local mysql server hosts"
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
        }>> time_record.log
        timer_st=$(date +%s)
        for ((i=1;i<="${shardingNum}";i++))
        do
        {
            if [ ! -f "${test_dir}${dump_file_name}.sql-an${i}.sql" ]; then
                mv ${test_dir}${dump_file_name}.sql-an"${i}"-*dump ${test_dir}${dump_file_name}.sql-an"${i}".sql
            fi   
    
            node=$(( i % 4 ))
    
            case ${node} in
                1) host=${MYSQL_HOST[0]};;
                2) host=${MYSQL_HOST[1]};;
                3) host=${MYSQL_HOST[2]};;
                0) host=${MYSQL_HOST[3]};;
            esac
    
            # scp ${test_dir}${dump_file_name}.sql-an"${i}".sql test@${host}:/tmp/splittest/.
            expect "${test_dir}"send_split_dump_files.sh "${i}" "${host}" "${test_dir}${dump_file_name}"
        }    
        done
        timer_ed=$(date +%s)
        cpduration=$(echo "$timer_ed" "$timer_st" | awk '{print $1-$2}')s
        {
            echo "split后的dumpfile文件cp到对应后端mysql本机,总时长(即最长耗时)为:   ${cpduration}  "
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> finish change split filename and cp them to their local mysql server hosts"
            echo " "
        }>> time_record.log
    }
    
    function exec_romote_test_time_on_each_hosts () {
        {
            echo " "
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> exec_remote_loadin_on_each_host  startTime"
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> startTime"
        }>> time_record.log
    
        timer_s=$(date +%s)
        for host in "${MYSQL_HOST[@]}"
        do
        {
            echo "${host}" 
            expect "${test_dir}"exec_shell_on_remote_server.sh "${host}" 
        }&
        done
        wait 
        timer_e=$(date +%s)
        tduration=$(echo "$timer_e" "$timer_s" | awk '{print $1-$2}')s
        {
            echo "split后的dumpfile文件从后端mysql本机并发导入,总时长(即最长耗时)为:   ${tduration}  "
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <=======> endTime"
            echo "$(date +["%Y-%m-%d %H:%M:%S"]) <==========================> exec_remote_loadin_on_each_host  finishTime"
            echo " "
        }>> time_record.log
    
    }
    
    function clean_backend_schemas_and_old_dump_files () {
        rm_old_dump_files_on_each_shardingnodes
        create_backend_mysqls_from_dble_cmd
    }
    
    function test_of_split_dumpfile_on_mysql_respectively () {
        echo "NOW we are testing scp split_dumpfiles to mysql and loadin respectively,and shardingnode num is ${shardingNum}--------------------------------------------------------------">> time_record.log
        clean_backend_schemas_and_old_dump_files
        get_split_exec_cost_time
        change_split_dumpfile_name_and_cp_them_to_their_local
        exec_romote_test_time_on_each_hosts
        echo "NOW we have finished the testing of scp split_dumpfiles to mysql and loadin respectively,and shardingnode num is ${shardingNum}---------------------------------------------">> time_record.log
    }
    
    function test_of_split_dumpfile_on_dbleServer () {
        echo "NOW we are testing loadin on dble server locally,and shardingnode num is ${shardingNum}--------------------------------------------------------------">> time_record.log
        clean_backend_schemas_and_old_dump_files
        get_split_exec_cost_time
        loadAll_shardingDumpFiles_on_dble_server
        echo "NOW we have finished the testing of loadin on dble server locally,and shardingnode num is ${shardingNum}---------------------------------------------">> time_record.log
    }
    
    if [ ${LOADIN} == "locally" ]; then
         test_of_split_dumpfile_on_dbleServer
    elif [ ${LOADIN} == "remote" ]; then
         test_of_split_dumpfile_on_mysql_respectively
    else
         echo "please check the file line 7: The value of variable LOADIN should be one of 'locally' or 'remote' ;The variable maybe on line 7"
        exit 1   
    fi
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208
    • 209
    • 210
    • 211
    • 212
    • 213
    • 214
    • 215
    • 216
    • 217
    • 218
    • 219
    • 220
    • 221
    • 222
    • 223
    • 224
    • 225
    • 226
    • 227
    • 228
    • 229
    • 230
    • 231
    • 232
    • 233
    • 234
    • 235
    • 236
    • 237
    • 238
    • 239
    • 240
    • 241
    • 242
    • 243
    • 244
    • 245
    • 246
    • 247
    • 248
    • 249
    • 250
    • 251
    • 252
    • 253
    • 254
    • 255
    • 256
    • 257
    • 258
    • 259
    • 260
    • 261
    • 262
    • 263
    • 264
    • 265
    • 266
    • 267
    • 268
    • 269
    • 270
    • 271
    • 272
    • 273
    • 274
    • 275
    • 276
    • 277
    • 278
    • 279
    • 280
    • 281
  • 相关阅读:
    要点初见:切换老版本Rust并运行老版本cargo
    神奇工具 (支持Android系统)
    qt设置tableView中文字颜色(使用自定义model)
    聊聊自动驾驶中的LiDAR和Radar
    鸿蒙工程目录介绍
    scada组态软件和硬件结构的主要功能
    关于时空数据的培训 GAN:实用指南(第 02/3 部分)
    实训二十一:RIP2被动接口
    angular的observable
    Spring Boot集成easyposter快速入门Demo
  • 原文地址:https://blog.csdn.net/ActionTech/article/details/125520385