• 數據集成平台:datax將MySQL數據同步到hive(全部列和指定列)


    1.數據集成平台:將MySQL數據同步到hive(全部和指定列)

    1. python環境:2.7版本
    2. py腳本
      傳參:

    source_database:數據庫
    source_table:表
    source_columns:列
    source_splitPk:split key,要求必須是int類型

    # coding=utf-8
    import json
    import getopt
    import os
    import sys
    import MySQLdb
    
    #MySQL相关配置,需根据实际情况作出修改
    mysql_host = "47.57.227.5"
    mysql_port = "3306"
    mysql_user = "vinson_readonly"
    mysql_passwd = "8AGY5Eqq8Ac8VR7b"
    
    #HDFS NameNode相关配置,需根据实际情况作出修改
    hdfs_nn_host = "mycluster"
    hdfs_nn_port = "8020"
    
    #生成配置文件的目标路径,可根据实际情况作出修改
    def get_connection():
        return MySQLdb.connect(host=mysql_host, port=int(mysql_port), user=mysql_user, passwd=mysql_passwd)
    
    
    def get_mysql_meta(database, table, columns):
        connection = get_connection()
        cursor = connection.cursor()
        if columns == 'all':
            # 如果传入 '*' 表示要所有列
            sql = "SELECT COLUMN_NAME, DATA_TYPE FROM information_schema.COLUMNS WHERE TABLE_SCHEMA='%s' AND TABLE_NAME='%s' ORDER BY ORDINAL_POSITION" % (database, table)
        else:
            # 传入指定列
            # 将每个列名加上单引号
            columns = ', '.join("'%s'" % col.strip() for col in columns.split(','))
            sql = "SELECT COLUMN_NAME, DATA_TYPE FROM information_schema.COLUMNS WHERE TABLE_SCHEMA='%s' AND TABLE_NAME='%s' AND COLUMN_NAME IN (%s) ORDER BY ORDINAL_POSITION" % (
            database, table, columns)
        cursor.execute(sql)
        fetchall = cursor.fetchall()
        # print(fetchall)
        cursor.close()
        connection.close()
        return fetchall
    
    
    def get_mysql_columns(database, table,source_columns):
        return map(lambda x: x[0], get_mysql_meta(database,table,source_columns))
    
    
    def get_hive_columns(database, table,source_columns):
        def type_mapping(mysql_type):
            mappings = {
                "bigint": "bigint",
                "int": "bigint",
                "smallint": "bigint",
                "tinyint": "bigint",
                "mediumint": "bigint",
                "decimal": "string",
                "double": "double",
                "float": "float",
                "binary": "string",
                "char": "string",
                "varchar": "string",
                "datetime": "string",
                "time": "string",
                "timestamp": "string",
                "date": "string",
                "text": "string",
                "bit": "string",
            }
            return mappings[mysql_type]
    
        meta = get_mysql_meta(database, table,source_columns)
        return map(lambda x: {"name": x[0], "type": type_mapping(x[1].lower())}, meta)
    
    
    def generate_json(source_database, source_table,source_columns,source_splitPk):
        job = {
            "job": {
                "setting": {
                    "speed": {
                          "channel": 15
                        },
                    "errorLimit": {
                        "record": 0,
                        "percentage": 0.02
                    }
                },
                "content": [{
                    "reader": {
                        "name": "mysqlreader",
                        "batchSize":"8192",
                        "batchByteSize":"33554432",
                        "parameter": {
                            "username": mysql_user,
                            "password": mysql_passwd,
                            "column": get_mysql_columns(source_database, source_table,source_columns),
                            "splitPk": source_splitPk,
                            "connection": [{
                               "table": [source_table],
                                "jdbcUrl": ["jdbc:mysql://" + mysql_host + ":" + mysql_port + "/" + source_database + "?userCompress=true&useCursorFetch=true&useUnicode=true&characterEncoding=utf-8&useSSL=false"]
                            }]
                        }
                    },
                    "writer": {
                        "name": "hdfswriter",
                         "batchSize":"8192",
                         "batchByteSize":"33554432",
                        "parameter": {
                            "defaultFS": "hdfs://" + hdfs_nn_host + ":" + hdfs_nn_port,
                            "fileType": "text",
                            "path": "${targetdir}",
                            "fileName": source_table,
                            "column": get_hive_columns(source_database, source_table,source_columns),
                            "writeMode": "append",
                            "fieldDelimiter": u"\u0001",
                            "compress": "gzip"
                        }
                    },
                    "transformer": [
    
                            {
                              "name": "dx_groovy",
                              "parameter": {
                                "code": "for(int i=0;i\"[\\r\\n]\",\"\"); record.setColumn(i, new StringColumn(newStr)); };};return record;",
                                "extraPackage":[]
                              }
                            }
                          ]
                }]
            }
        }
        output_path = "/opt/module/datax/job/import/" + source_database
    
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        with open(os.path.join(output_path, ".".join([source_database, source_table, "json"])), "w") as f:
            json.dump(job, f)
    
    
    def main(args):
        source_database = ""
        source_table = ""
        source_columns = ""
        source_splitPk = ""
    
        options, arguments = getopt.getopt(args, 'd:t:c:k:', ['sourcedb=', 'sourcetbl=', 'columns=', 'splitPk='])
        for opt_name, opt_value in options:
            if opt_name in ('-d', '--sourcedb'):
                source_database = opt_value
            if opt_name in ('-t', '--sourcetbl'):
                source_table = opt_value
            if opt_name in ('-c', '--columns'):
                source_columns = opt_value
            if opt_name in ('-k', '--splitPk'):
                source_splitPk = opt_value
        generate_json(source_database, source_table,source_columns,source_splitPk)
    
    if __name__ == '__main__':
        main(sys.argv[1:])
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    1. sh腳本
    #!/bin/bash
    python ~/bin/sap_gateway_gen_import_config.py -d db -t table -c Id,created_date -k selfincrementid
    python ~/bin/sap_gateway_gen_import_config.py  -d db -t table  -c all -k selfincrementid
    
    
    • 1
    • 2
    • 3
    • 4
  • 相关阅读:
    uboot引导vxworks--t4080调试记录
    uni-appH5修改端口号
    35.【C/C++ 枚举(bool)类型和宏定义 (超详细)】
    计算机毕业设计django基于python图书馆借阅系统(源码+系统+mysql数据库+Lw文档)
    我用什么笔记软件?
    ROS2系列知识(3):环境配置
    渗透测试漏洞挖掘技巧
    从js和json中引入数据的区别
    Flutter 第一个程序Hello World!
    linux内核中ecryptfs模块分析
  • 原文地址:https://blog.csdn.net/m0_37759590/article/details/136270702