# _*_ coding: utf-8 _*_
"""
    Created by Yiutto on 2024/11/22.
"""
import argparse
import pymysql
import re


# host = '172.20.51.224'
# port = 3306
# user = 'data_read'
# passwd = 'Hx2JcsrH'
# db = 'lepos'
# charset = 'utf8'
# mysqlTableName = "t_pay_delay_insurance"
# # 需要把结果输出到哪个文件
# dataxDir = "/home/hadoop/yt/%s.json" % mysqlTableName

def connectDB(host, port, user, passwd, db, charset='utf8'):
    """
    创建数据库连接
    :return:
    """
    con = pymysql.connect(host=host, port=port, user=user, password=passwd, db=db, charset=charset)
    return con


def queryData(host, port, user, passwd, db, tb, etl_type, part_flag, where_flag, pull_type, save_type, hive_db):
    """
    :param host: IP主机名
    :param port: 端口
    :param user: 用户名
    :param passwd: 密码
    :param db: 数据库
    :param tb: 数据库表
    :param etl_type: 0单库单表，1单库多表，2多库多表，3多ip多库多表
    :param part_flag: 0无分区，1分区表
    :param where_flag: 0无条件，1有条件
    :param pull_type: 0为sqoop抽数，1为datax抽数
    :param save_type: orc格式，parquet格式，text格式
    :param hive_db: hive表的库名（默认ods）
    :return:
    """
    # SQL 查询语句0 拼接所有字段，sqoop抽数字段区分大小写
    sql_columns = "SELECT group_concat(COLUMN_NAME SEPARATOR ',') FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name='%s' AND table_schema='%s' ORDER BY ORDINAL_POSITION" % (
    tb, db)

    # SQL 查询语句1 列名
    sql = "SELECT column_name, column_comment, data_type FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name='%s' AND table_schema='%s' ORDER BY ORDINAL_POSITION" % (
        tb, db)

    # SQL 查询语句2 查找自增主键
    sql_pri_key = "SELECT ifnull(column_name, '') AS pri_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name='%s' AND table_schema='%s' AND column_key = 'PRI' AND (extra = 'auto_increment' OR data_type IN ('int', 'bigint'))" % (
        tb, db)

    # SQL 查询语句3 查找时间字段
    sql_time_cols = "SELECT ifnull(GROUP_CONCAT(concat(column_name, '->', data_type), ', '), '') AS time_index_columns FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name='" + tb + "' AND table_schema='" + db + "' AND (data_type IN ('date', 'time', 'timestamp', 'datetime')  OR column_name LIKE '%time%' OR column_name LIKE '%date%')"

    # SQL 查询语句4 表注释
    sql_table_cmt = "SELECT ifnull(table_comment, '') AS table_comment FROM INFORMATION_SCHEMA.TABLES WHERE table_name = '%s' AND table_schema = '%s'" % (
    tb, db)
    # 创建空列表
    create_table_list = []
    datax_json_list = []
    datax_writer_json_list = []

    # 创建数据库连接
    con = connectDB(host, port, user, passwd, db)

    # 使用cursor()方法获取操作游标
    cursor = con.cursor()
    try:
        # 执行SQL语句0
        cursor.execute(sql_columns)
        sql_columns_rows = cursor.fetchall()
        columns = sql_columns_rows[0][0]
        # 执行SQL语句1
        cursor.execute(sql)
        # 获取所有记录列表
        results = cursor.fetchall()
        for row in results:
            # 列名
            column_name = hump2underline(row[0])
            # 注释
            column_comment = row[1]
            # Mysql中字段数据类型
            date_type = row[2]
            # 给列表赋值
            create_table_list.append("  %s %s comment '%s'" % (column_name, getDataType(date_type), column_comment))
            datax_json_list.append('\"' + column_name + '\"')
            datax_writer_json_list.append(
                "                         { \n                            \"name\":\"%s\",\n                            \"type\":\"%s\"\n                          }" % (
                    column_name, getDataType(date_type)))

        # 执行SQL语句2
        cursor.execute(sql_pri_key)
        pri_key_rows = cursor.fetchall()
        pri_key = ''
        if len(pri_key_rows) > 0:
            pri_key = pri_key_rows[0][0]

        # 执行SQL语句3
        cursor.execute(sql_time_cols)
        time_cols_rows = cursor.fetchall()
        time_cols = ''
        if len(time_cols_rows) > 0:
            time_cols = time_cols_rows[0][0]

        # 执行SQL语句4
        cursor.execute(sql_table_cmt)
        table_cmt_rows = cursor.fetchall()
        table_cmt = ''
        if len(table_cmt_rows) > 0:
            table_cmt = table_cmt_rows[0][0]
    except:
        print("Error: unable to fecth data")
    # 将库名和表名去后缀
    newDbName, newTbName, hive_tb, suffix_flag = getNewDbTbName(db, tb, part_flag)



    # 生成建表语句
    createTableStr = "#==================================================%s.hql============================================================== \r\n" % hive_tb + \
                     createTable(hive_tb, create_table_list, pull_type, etl_type, part_flag, save_type, hive_db, table_cmt)

    if etl_type == 0:  # 单库单表
        newDbName, newTbName = db, tb  # 抽数时，保留原来的库名表名
    if etl_type == 1:  # 单库多表
        newDbName = db


    # 生成datax的json文件 （默认sqoop，）
    dataxJsonStr = ''

    # datax抽数
    if pull_type == 1:
        dataxJsonStr = "#==================================================%s.json============================================================== \r\n" % hive_tb + \
                       createDataxJson(host, port, user, passwd, newDbName, newTbName, etl_type, datax_json_list,
                                       datax_writer_json_list, part_flag, where_flag, save_type, pri_key, hive_db,
                                       hive_tb)
    # 生成sh脚本
    shellSrciptStr = ""

    # datax抽数
    if pull_type == 1:
        sqoopStr = ''
        shellSrciptStr = "#================================================%s.sh============================================================== \r\n" % hive_tb + \
                         getShellScript(hive_tb, time_cols, etl_type, part_flag, hive_db, suffix_flag, where_flag,
                                        sqoopStr)

    # 生成sqoop抽数脚本
    if pull_type == 0:
        sqoopStr = createSqoopScript(host, port, user, passwd, newDbName, newTbName, etl_type, part_flag, where_flag,
                                     save_type,
                                     columns, hive_db, hive_tb)
        shellSrciptStr = "#================================================%s.sh============================================================== \r\n" % hive_tb + \
                         getShellScript(hive_tb, time_cols, etl_type, part_flag, hive_db, suffix_flag, where_flag,
                                        sqoopStr)

    # 关闭数据库连接
    cursor.close()
    con.close()
    return createTableStr, dataxJsonStr, shellSrciptStr


def getShellScript(hive_tb, time_cols, etl_type, part_flag, hive_db, suffix_flag, where_flag, sqoop_str):
    """

    :param hive_tb:  hive表名
    :param time_cols: 时间索引列 组
    :param etl_type:  0单库单表 1单库多表 2多库多表 3多机房多库多表
    :param part_flag: 是否分区
    :param hive_db:  hive库名
    :param suffix_flag: 表名后缀：df di mi yi xx
    :param where_flag: 是否有where条件
    :param sqoop_str: sqoop抽数则有值，否则为空字符串
    :return:
    """
    # 第一层嵌套
    tmp_pull_1s = "\n     #for src in 'LG1' 'SH1' 'LG2' 'SH2'" + \
                  "\n     for src in 'LG' 'SH'" + \
                  "\n     do"
    tmp_pull_1e = "\n     done"

    # 第二层嵌套
    tmp_pull_2s = "\n     for i in {0..9}" + \
                  "\n     do"
    tmp_pull_2e = "\n     done"

    # 第三层嵌套
    tmp_pull_3s = "\n         for j in {00..99}" + \
                  "\n         do"
    tmp_pull_3e = "\n         done"

    # 清空全表
    truncate_tb ="\n         beeline -u \"jdbc:hive2://nn2:25005\" -n bigdata -e \"" + \
                 "\n         TRUNCATE TABLE " + hive_db + "." + hive_tb + ";\"" + \
                 "\n         if [ $? != 0 ]; then" + \
                 "\n             echo \"FAILED!\"" + \
                 "\n             exit 1" + \
                 "\n         else" + \
                 "\n             echo \"SUCCESS!\"" + \
                 "\n         fi"
    # 清空分区表
    truncate_tb_part =   "\n         beeline -u \"jdbc:hive2://nn2:25005\" -n bigdata -e \"" +\
                    "\n         ALTER TABLE " + hive_db + "." + hive_tb + " DROP IF EXISTS PARTITION(dt='${dt}'" + (", src='${src}'" if etl_type == 3 and len(sqoop_str)>1 else "") + ");" + \
                    "\n         ALTER TABLE " + hive_db + "." + hive_tb + " ADD PARTITION(dt='${dt}'" + (", src='${src}'" if etl_type == 3 and len(sqoop_str)>1else "") + ")" + \
                    "\n         LOCATION '/user/hive/warehouse/" + hive_db + ".db/" + hive_tb + "/dt=${dt}" + ("/src=${src}" if etl_type == 3 and len(sqoop_str)>1 else "")+"';\"" + \
                    "\n         if [ $? != 0 ]; then" + \
                    "\n             echo \"FAILED!\"" + \
                    "\n             exit 1" + \
                    "\n         else" + \
                    "\n             echo \"SUCCESS!\"" + \
                    "\n         fi"

    sh1 = "#!/bin/bash \n %s\n" % sqoop_str
    sh2 = "# Here is the mysql time columns: %s, You should change xxx.json ${partition_col} \npartition_col=xxxxx \n\n" % time_cols

    # 多机房 千表【不带时间分区】 分区表
    datax_sh3 = "\n           /opt/anaconda3/bin/python /opt/datax/bin/datax.py -p \"-Dip1='$ip1' -Dip2='$ip2'\" " + hive_tb + ".json \n"


    sqoop_sh3 = "\n              sqoop_script '$ip1' 'SH' '$i' '$j' &" + \
                "\n              sqoop_script '$ip2' 'LG' '$i' '$j' " + \
                "\n              #sqoop_script '$ip1' 'SH1' '$i' '$j' &" + \
                "\n              #sqoop_script '$ip2' 'LG1' '$i' '$j' &" + \
                "\n              #sqoop_script '$ip3' 'SH2' '$i' '$j' &" + \
                "\n              #sqoop_script '$ip4' 'LG2' '$i' '$j' \n"
    if len(sqoop_str) > 1:  #  sqoop抽数方案
        pull_sh3 = "function get_pull_data() {" + \
                   truncate_tb + \
                   tmp_pull_2s + \
                   tmp_pull_3s + \
                   sqoop_sh3 + \
                   tmp_pull_3e + \
                   tmp_pull_2e + \
                   "\n}\n"
    else:  #  datax抽数方案
        pull_sh3 = "function get_pull_data() {" + \
                   truncate_tb + \
                   datax_sh3 + \
                   "}\n"

    # 多机房 千表【带时间分区】 分区表

    part_datax_sh3 = "\n            /opt/anaconda3/bin/python /opt/datax/bin/datax.py -p \"-Dpartition_col='$partition_col' -Ddt='$dt' -Dip1='$ip1' -Dip='$ip2'\" " + hive_tb + ".json \n"

    part_sqoop_sh3 = "\n              sqoop_script '$ip1' 'SH' '$i' '$j' &\n" + \
                     "              sqoop_script '$ip2' 'LG' '$i' '$j' \n" + \
                     "              #sqoop_script '$ip1' 'SH1' '$i' '$j' &\n" + \
                     "              #sqoop_script '$ip2' 'LG1' '$i' '$j' &\n" + \
                     "              #sqoop_script '$ip3' 'SH2' '$i' '$j' &\n" + \
                     "              #sqoop_script '$ip4' 'LG2' '$i' '$j' \n"

    if len(sqoop_str) > 1:  # sqoop抽数方案
        part_pull_sh3 = "function get_pull_data() {" + \
                        "\n     dt=`date -d \"${1} -0 day\" +%Y-%m-%d`" + \
                        tmp_pull_1s + \
                        truncate_tb_part + \
                        tmp_pull_1e + \
                        tmp_pull_2s + \
                        tmp_pull_3s + \
                        part_sqoop_sh3 + \
                        tmp_pull_3e + \
                        tmp_pull_2e + \
                        "\n}\n"
    else:  #  datax抽数方案
        part_pull_sh3 = "function get_pull_data() {" + \
                   "\n       dt=`date -d \"${1} -0 day\" +%Y-%m-%d`" + \
                   truncate_tb_part + \
                   part_datax_sh3 + \
                   "}\n"


    # 千表0-9 00-99【不带时间分区】
    datax_sh2 = "              /opt/anaconda3/bin/python /opt/datax/bin/datax.py " + hive_tb + ".json\n"
    sqoop_sh2 = "              sqoop_script '$i' '$j'\n"
    if len(sqoop_str) > 1:  #  sqoop抽数方案
        pull_sh2 = "function get_pull_data() {" + \
                   truncate_tb + \
                   tmp_pull_2s + \
                   tmp_pull_3s + \
                   sqoop_sh2 + \
                   tmp_pull_3e + \
                   tmp_pull_2e + \
                   "\n}\n"
    else:  #  datax抽数方案
        pull_sh2 = "function get_pull_data() {" + \
                   truncate_tb + \
                   datax_sh2 + \
                   "}\n"

    # 千表0-9 00-99【带时间分区】
    part_datax_sh2 = "\n              /opt/anaconda3/bin/python /opt/datax/bin/datax.py -p \"-Dpartition_col='$partition_col' -Ddt='$dt'\" " + hive_tb + ".json\n"
    part_sqoop_sh2 = "\n              sqoop_script '$i' '$j'\n"
    if len(sqoop_str) > 1:  # sqoop抽数方案
        part_pull_sh2 = "function get_pull_data() {" + \
                        "\n     dt=`date -d \"${1} -0 day\" +%Y-%m-%d`" + \
                        truncate_tb_part + \
                        tmp_pull_2s + \
                        tmp_pull_3s + \
                        part_sqoop_sh2 + \
                        tmp_pull_3e + \
                        tmp_pull_2e + \
                        "\n}\n"
    else:  #  datax抽数方案
        part_pull_sh2 = "function get_pull_data() {" + \
                   "\n     dt=`date -d \"${1} -0 day\" +%Y-%m-%d`" + \
                   truncate_tb_part + \
                   part_datax_sh2 + \
                   "}\n"
    # 百表00-99【不带时间分区】
    datax_sh1 = "\n              /opt/anaconda3/bin/python /opt/datax/bin/datax.py " + hive_tb + ".json\n"
    sqoop_sh1 = "\n              sqoop_script '$j'\n"
    if len(sqoop_str) > 1:  #  sqoop抽数方案
        pull_sh1 = "function get_pull_data() {" + \
                   truncate_tb + \
                   tmp_pull_3s + \
                   sqoop_sh1 + \
                   tmp_pull_3e + \
                   "\n}\n"
    else:  #  datax抽数方案
        pull_sh1 = "function get_pull_data() {" + \
                   truncate_tb + \
                   datax_sh1 + \
                   "}\n"

    # 百表00-99【带时间分区】
    part_datax_sh1 = "\n              /opt/anaconda3/bin/python /opt/datax/bin/datax.py -p \"-Dpartition_col='$partition_col' -Ddt='$dt'\" " + hive_tb + ".json\n"
    part_sqoop_sh1 = "\n              sqoop_script '$j'\n"
    if len(sqoop_str) > 1:  # sqoop抽数方案
        part_pull_sh1 = "function get_pull_data() {" + \
                        "\n     dt=`date -d \"${1} -0 day\" +%Y-%m-%d`" + \
                        truncate_tb_part + \
                        tmp_pull_3s + \
                        part_sqoop_sh1 + \
                        tmp_pull_3e + \
                        "\n}\n"
    else:  #  datax抽数方案
        part_pull_sh1 = "function get_pull_data() {" + \
                   "\n     dt=`date -d \"${1} -0 day\" +%Y-%m-%d`" + \
                   truncate_tb_part + \
                   part_datax_sh1 + \
                   "}\n"


    pull_sh0 = "beeline -u \"jdbc:hive2://nn2:25005\" -n bigdata -e \"\n TRUNCATE TABLE " + hive_db + "." + hive_tb + \
                ";\"\nif [ $? != 0 ]; then\n    echo \"FAILED!\"\n    exit 1\nelse\n    echo \"SUCCESS!\"\nfi\n" + \
               ("sqoop_script\n" if len(sqoop_str) > 1 else "/opt/anaconda3/bin/python /opt/datax/bin/datax.py " + hive_tb + ".json\n") + \
                "if [ $? != 0 ]; then\n    echo \"FAILED!\"\n    exit 1\nelse\n    echo \"SUCCESS!\"\nfi\n"

    # datax_di_mi_yi
    sh_dt = " -0 day\" +%Y-%m-%d"
    if suffix_flag == "mi":
        sh_dt = "01 -0 day\" +%Y-%m"
    if suffix_flag == "yi":
        sh_dt = "0101 -0 day\" +%Y"
    pull_sh_dt = "function get_pull_data() {\n     dt=`date -d \"${1}" + sh_dt + "`\n     beeline -u \"jdbc:hive2://nn2:25005\" -n bigdata -e \"\n     ALTER TABLE " + hive_db + "." + hive_tb + \
                  " DROP IF EXISTS PARTITION(dt='${dt}');\n     ALTER TABLE " + hive_db + "." + hive_tb + \
                  " ADD PARTITION(dt='${dt}')\n     LOCATION '/user/hive/warehouse/" + hive_db + ".db/" + hive_tb + \
                  "/dt=${dt}';\"\n     if [ $? != 0 ]; then\n         echo \"FAILED!\"\n         exit 1\n     else\n         echo \"SUCCESS!\"\n     fi\n" + ("     j=${1}\n" if etl_type>0 else "") + \
                 ("    sqoop_script\n" if len(sqoop_str) > 1 else "     /opt/anaconda3/bin/python /opt/datax/bin/datax.py -p \"-Ddt='$dt' " + ("-Dpartition_col='$partition_col' " if where_flag == 1 else "") + ( "\" " if etl_type == 0 else "-Dj='$j'\" ") + hive_tb + ".json\n") + \
                  "     if [ $? != 0 ]; then\n          echo \"FAILED!->$j\"\n          #exit 1\n      else\n          echo \"SUCCESS!->$j\"\n      fi\n" + \
                  "}\n"
    # 不按时间传参调用函数
    no_time_sh = "\nget_pull_data\n"

    # 按天抽取设置
    time_sh_di = "# You should change \"begin_dt\" \"end_dt\" \"%Y%m%d\" \nbegin_dt=20180814\nend_dt=20181231\n\nbeg_s=`date -d \"$begin_dt\" +%s`\nend_s=`date -d \"$end_dt\" +%s`\nwhile [[ \"$beg_s\" -le \"$end_s\" ]]; do\n  day=`date -d @$beg_s +%Y%m%d`\n  echo \"当前日期：$day\"\n  get_pull_data \"${day}\"\n  #按天增加\n  beg_s=$((beg_s+86400));\ndone\n"

    # 按月
    time_sh_mi = "# You should change \"begin_mon\" \"end_mon\" \"%Y%m\" \nbegin_mon=201405\nend_mon=201903\n\nbeg_s=`date -d \"${begin_mon}01\" +%s`\nend_s=`date -d \"${end_mon}01\" +%s`\nwhile [[ \"$beg_s\" -le \"$end_s\" ]]; do\n  mon=`date -d @$beg_s +%Y%m`\n  echo \"当前月份：$mon\"\n  get_pull_data \"${mon}\"\n  #按月增加\n  beg_s=`date -d \"${mon}01 +1 months\" +%s`;\ndone\n"

    # 按年
    time_sh_yi = "# You should change \"begin_year\" \"end_year\" \"%Y\" \nbegin_year=2014\nend_year=2019\n\nbeg_s=`date -d \"${begin_year}0101\" +%s`\nend_s=`date -d \"${end_year}0101\" +%s`\nwhile [[ \"$beg_s\" -le \"$end_s\" ]]; do\n  year=`date -d @$beg_s +%Y`\n  echo \"当前月份：$year\"\n  get_pull_data \"${year}\"\n  #按月增加\n  beg_s=`date -d \"${year}0101 +1 years\" +%s`;\ndone\n"

    # 最终的shell脚本
    shellSrciptStr= ""

    if etl_type == 0:  # 单库单表
        shellSrciptStr = sh1 + (sh2 + pull_sh_dt + time_sh_di if part_flag == 1 else pull_sh0)

    if etl_type == 1:  # 单库多表 存在t_operation_info_00 或者 t_wx_fix_remote_transaction_20180609（日，月，年）
        if suffix_flag == "di":
            shellSrciptStr = sh1 + sh2 + pull_sh_dt + time_sh_di

        if suffix_flag == "xx":  # 00-99表 按时间分区/不按时间分区
            shellSrciptStr = sh1 + sh2 + (part_pull_sh1 if part_flag == 1 else pull_sh1) + (
                time_sh_di if part_flag == 1 else no_time_sh)

        if suffix_flag == "mi":
            shellSrciptStr = sh1 + sh2 + pull_sh_dt + time_sh_mi

        if suffix_flag == "yi":
            shellSrciptStr = sh1 + sh2 + pull_sh_dt + time_sh_yi

    if etl_type == 2:  # 多库多表
        shellSrciptStr = sh1 + sh2 + (part_pull_sh2 if part_flag == 1 or where_flag == 1 else pull_sh2) + (
            time_sh_di if part_flag == 1 else no_time_sh)

    if etl_type == 3:  # 多机房多库多表
        shellSrciptStr = sh1 + sh2 + (part_pull_sh3 if part_flag == 1 or where_flag == 1 else pull_sh3) + (
            time_sh_di if part_flag == 1 else no_time_sh)

    # print(shellSrciptStr + "\r\n")
    return shellSrciptStr


def getNewDbTbName(db_name, tb_name, part_flag):
    """
    将原mysql的db、tb_name去除后缀
    如lepos0变成lepos，t_operation_info_00变成t_operation_info
    :param db_name:
    :param tb_name:
    :param part_flag: 是否分区
    :return:
    """

    newDbName = re.sub('[0-9]$', '', db_name).lower()
    newTbName = re.sub('(_[0-9]{2,10}){1,3}$', '', tb_name).lower()
    # 获取后缀字符串（yyyymm后缀，yyyymmdd后缀，yyyy后缀）
    suffixStr = tb_name.replace(newTbName, '').replace('_', '')
    print("suffixStr: " + suffixStr)
    suffix_flag = ""
    if suffixStr.isdigit():  # 判断是否存数字
        if len(suffixStr) == 8:  # t_operation_info_20201010
            suffix_flag = "di"
        if len(suffixStr) == 6:  # t_operation_info_202010
            suffix_flag = "mi"
        if len(suffixStr) == 4:  # t_operation_info_2020
            suffix_flag = "yi"
        if len(suffixStr) == 2:  # t_operation_info_00
            suffix_flag = "xx"

    # 默认无分区
    hive_tb = newDbName + '_' + newTbName + "_df"

    if part_flag == 1:
        if suffix_flag in ("di", "mi", "yi"):
            hive_tb = newDbName + '_' + newTbName + "_" + suffix_flag
        else:
            # 默认弄成 日分区
            hive_tb = newDbName + '_' + newTbName + "_di"

    return newDbName, newTbName, hive_tb, suffix_flag


def hump2underline(str):
    """
    驼峰命名转下划线
    :param hunp_str:
    :return:
    """
    # 匹配正则，匹配小写字母和大写字母的分界位置
    p = re.compile(r'([a-z]|\d)([A-Z])')
    # 这里第二个参数使用了正则分组的后向引用
    newColumn = re.sub(p, r'\1_\2', str).lower()
    return newColumn


def getDataType(str):
    """
    根据mysql的数据类型
    转换成hive的数据类型
    :param str:
    :return:
    """
    if str in (
    "enum", "varchar", "text", "blob", "longblob", "longtext", "char", "json", "set", "date", "time", "mediumblob",
    "mediumtext", "datetime", "timestamp"):
        return "string"
    elif str in ("tinyint", "smallint", "mediumint"):
        return "int"
    elif str in ("int", "bigint"):
        return "bigint"
    elif str in ("double", "float"):
        return "double"
    elif str == "decimal":
        return "double"  # 20250829 修改  return "decimal"
    elif str in ("binary", "varbinary"):
        return "binary"
    else:
        return str

def createTable(hive_tb, create_table_list, pull_type, etl_type, part_flag, save_type, hive_db, table_cmt):
    """
    生成hive建表语句
    :param hive_tb:
    :param create_table_list:
    :param pull_type: 1datax  0sqoop
    :param etl_type: 0单库单表，1单库多表，2多库多表，3多来源多库多表
    :param part_flag: 是否分区 0/1
    :param save_type: orc格式，parquet格式，text格式
    :param hive_db: hive数据库 默认ods
    :param table_cmt: 表注释
    :return:
    """
    # create_table_list.append("create_date string")
    # create_table_list.append("modify_date string")
    hive_str1 = "use " + hive_db + "; \n-- drop table if EXISTS %s;\n" % hive_tb
    hive_str2 = "CREATE TABLE if not EXISTS %s \n(\n" % hive_tb
    hive_str3 = ',\n'.join(create_table_list)
    hive_str4 = "\n) COMMENT '%s'" % table_cmt

    partition_str = ""
    if part_flag == 1:
        partition_str = "PARTITIONED BY (\n    dt string COMMENT 'yyyy-MM-dd或yyyy-MM或yyyy' %s\n)" % (
            "\n    src string COMMENT '来源SH1,LG1,SH2,LG2'" if etl_type == 3 and pull_type == 0 else "")
    hive_str5 = "\n%s row format delimited fields terminated by '\\u0001'\nSTORED AS %s;" % (partition_str, save_type if save_type != 'text' else 'textfile')
    createTableStr = hive_str1 + hive_str2 + hive_str3 + hive_str4 + hive_str5

    # f = open(dataxDir, 'a')
    # f.write("==================================================createTable============================================================== \r\n")
    # f.write(createTableStr + "\r\n")
    # f.close()
    # print(createTableStr + "\r\n")
    return createTableStr

def createSqoopScript(host, port, user, passwd, newDbName, newTbName, etl_type, part_flag, where_flag, save_type, columns, hive_db, hive_tb):
    """

    :param host:
    :param port:
    :param user:
    :param passwd:
    :param newDbName:
    :param newTbName:
    :param etl_type: 0单库单表，1单库多表，2多库多表，3多ip多库多表
    :param part_flag:
    :param where_flag:
    :param save_type:
    :param columns:
    :param hive_db:
    :param hive_tb:
    :return:
    """
    print(columns)
    sqoop_prefix1 = "function sqoop_script() { \n"
    sqoop_prefix2 = ""
    if etl_type == 3:
        sqoop_prefix2 = "    ip=${1}\n    src=${2}\n    i=${3}\n    j=${4}\n"
    if etl_type == 2:
        sqoop_prefix2 = "    i=${1}\n    j=${2}\n"
    if etl_type == 1:
        sqoop_prefix2 = "    j=${1}\n"

    sqoop_start_Str0 = "    sqoop import -D mapred.job.queue.name=root.sqoop \\\n    --mapreduce-job-name %s \\\n" % (newDbName + "#" + newTbName)
    sqoop_Str1 = "    --connect 'jdbc:mysql://%s:%s/%s%s?useSSL=false&useUnicode=true&characterEncoding=utf-8&zeroDateTimeBehavior=convertToNull&tinyInt1isBit=false&defaultFetchSize=1000&useCursorFetch=true' \\\n" % ("${ip}" if etl_type == 3 else host, port, newDbName, "${i}" if etl_type in (2, 3) else "")
    sqoop_Str2 = "    --username %s \\\n    --password %s \\\n" % (user, passwd)
    sqoop_where_Str3 = ''
    if part_flag == 1: # 分区表
        # 多库多表添加来源
        partition_path = "/dt=${dt}%s" % ("/src=${src}" if etl_type == 3 else "")
        target_dir = "/user/hive/warehouse/" + hive_db + ".db/%s%s" % (hive_tb, partition_path)
        sqoop_where_Str3 = "    --query \"SELECT %s FROM %s%s WHERE \$CONDITIONS AND %s\" \\\n    --target-dir '%s' \\\n    --append \\\n" % (columns, newTbName, "_${j}" if etl_type != 0 else "", "${partition_col} >= '${dt}' and ${partition_col} < DATE_ADD('${dt}',INTERVAL 1 DAY)" if where_flag == 1 else "1=1", target_dir)

    else:
        sqoop_where_Str3 = "    --table %s \\\n    --columns '%s' \\\n    --where \"%s\" \\\n    --hive-import \\\n    --hive-table %s \\\n" % ((newTbName + "_${j}" if etl_type>0 else newTbName), columns, "${partition_col} >= '${dt}' and ${partition_col} < DATE_ADD('${dt}',INTERVAL 1 DAY)" if where_flag == 1 else "1=1", hive_db + "." + hive_tb)


    sqoop_end_Str5 = "    --fields-terminated-by '\\0001' \\\n    --lines-terminated-by '\\n' \\\n    --null-string '\\\\N' \\\n    --null-non-string '\\\\N' \\\n    --hive-drop-import-delims \\\n    --delete-target-dir \\\n    --compress \\\n    --compression-codec org.apache.hadoop.io.compress.SnappyCodec \\\n    -m 1 \n"
    sqoop_suffix = "    if [ $? != 0 ]; then\n        echo \"FAILED!\"\n        exit 1\n    else\n        echo \"SUCCESS!\"\n    fi\n}\n"
    sqoopScriptStr = sqoop_prefix1 + sqoop_prefix2 + sqoop_start_Str0 + sqoop_Str1 + sqoop_Str2 + sqoop_where_Str3 + sqoop_end_Str5 + sqoop_suffix

    return sqoopScriptStr

def createDataxJson(host, port, user, passwd, newDbName, newTbName, etl_type, datax_json_list, datax_writer_json_list,
                    part_flag, where_flag, save_type, pri_key, hive_db, hive_tb):
    """
    创建datax的json文件
    :param host:
    :param port:
    :param user:
    :param passwd:
    :param newDbName:
    :param newTbName:
    :param etl_type: 0单库单表，1单库多表，2多库多表，3多ip多库多表
    :param datax_json_list:
    :param datax_writer_json_list:
    :param part_flag: 0表示df全表，1表示di分区表
    :param where_flag: 0表示无where条件，1表示有where条件
    :param save_type: orc格式，parquet格式，text格式
    :param pri_key: 自增主键
    :param hive_db: hive数据库，默认ods
    :return:
    """
    hadoop_conf_json_list = [
        "                          \"dfs.ha.namenodes.nameservice1\": \"namenode60,namenode123\" ",
        "                          \"dfs.namenode.rpc-address.nameservice1.namenode60\": \"dn1:8020\" ",
        "                          \"dfs.namenode.rpc-address.nameservice1.namenode123\": \"nn2:8020\" ",
        "                          \"dfs.nameservices\": \"nameservice1\" ",
        "                          \"dfs.client.failover.proxy.provider.nameservice1\": \"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\" "
    ]

    columns = ',\n                               '.join(datax_json_list)
    json_str1 = "{\n   \"job\":{" + \
                "\n       \"setting\":{" + \
                "\n           \"speed\":{" + \
                "\n               \"channel\":3," + \
                "\n               \"byte\":1048576" + \
                "\n           },"
    json_str2 = "\n           \"errorLimit\":{" + \
                "\n                  \"record\":0," + \
                "\n                  \"percentage\":0.02 " + \
                "\n           }" + \
                "\n       },"
    json_str3 = "\n     \"content\":[" + \
                "\n            {" + \
                "\n              \"reader\":{" + \
                "\n                  \"name\":\"mysqlreader\"," + \
                "\n                  \"parameter\":{"
    json_str4 = f"\n                      \"username\":\"{user}\"," + \
                f"\n                      \"password\":\"{passwd}\"," + \
                "\n                      \"column\":[" + \
                f"\n                               {columns} " + \
                "\n                        ],"
    json_str5 = "\n                        \"where\":\"%s\",\n" % ("${partition_col} >= '${dt}' and ${partition_col} < DATE_ADD('${dt}',INTERVAL 1 DAY)" if where_flag == 1 else "1=1")
    json_str6 = f"                        \"splitPk\":\"{pri_key}\"," + \
                "\n                      \"connection\":["

    if etl_type == 0: # 单库单表
        json_str7 = "\n                            { " + \
                    "\n                              \"table\":[" + \
                    f"\n                                   \"{newDbName}.{newTbName}\" " + \
                    "\n                              ]," + \
                    "\n                              \"jdbcUrl\":[" + \
                    f"\n                                       \"jdbc:mysql://{host}:{port}/?useUnicode=true&characterEncoding=utf-8&zeroDateTimeBehavior=convertToNull&tinyInt1isBit=false&defaultFetchSize=5000&useCursorFetch=true\"" + \
                    "\n                              ]" + \
                    "\n                            }"

    elif etl_type == 1: # 单库多表
        json_str7 = "\n                            { " + \
                    "\n                              \"table\":[" + \
                    f"\n                                   \"{newDbName}.{newTbName}_[00-99]\" " + \
                    "\n                              ]," + \
                    "\n                              \"jdbcUrl\":[" + \
                    f"\n                                       \"jdbc:mysql://{host}:{port}/?useUnicode=true&characterEncoding=utf-8&zeroDateTimeBehavior=convertToNull&tinyInt1isBit=false&defaultFetchSize=5000&useCursorFetch=true\"" + \
                    "\n                              ]" + \
                    "\n                            }"
    elif etl_type == 2: # 多库多表
        json_str7 = "\n                            { " + \
                    "\n                              \"table\":[" + \
                    f"\n                                   \"{newDbName}0.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}1.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}2.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}3.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}4.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}5.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}6.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}7.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}8.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}9.{newTbName}_[00-99]\" " + \
                    "\n                              ]," + \
                    "\n                              \"jdbcUrl\":[" + \
                    f"\n                                       \"jdbc:mysql://{host}:{port}/?useUnicode=true&characterEncoding=utf-8&zeroDateTimeBehavior=convertToNull&tinyInt1isBit=false&defaultFetchSize=5000&useCursorFetch=true\"" + \
                    "\n                              ]" + \
                    "\n                            }"
    else: #多机房多库多表
        json_str7 = "\n                            { " + \
                    "\n                              \"table\":[" + \
                    f"\n                                   \"{newDbName}0.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}1.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}2.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}3.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}4.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}5.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}6.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}7.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}8.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}9.{newTbName}_[00-99]\" " + \
                    "\n                              ]," + \
                    "\n                              \"jdbcUrl\":[" + \
                    "\n                                       \"jdbc:mysql://{ip1}:%s/?useUnicode=true&characterEncoding=utf-8&zeroDateTimeBehavior=convertToNull&tinyInt1isBit=false&defaultFetchSize=5000&useCursorFetch=true\"" % port + \
                    "\n                              ]" + \
                    "\n                            }," + \
                    "\n                            { " + \
                    "\n                              \"table\":[" + \
                    f"\n                                   \"{newDbName}0.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}1.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}2.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}3.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}4.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}5.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}6.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}7.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}8.{newTbName}_[00-99]\", " + \
                    f"\n                                   \"{newDbName}9.{newTbName}_[00-99]\" " + \
                    "\n                              ]," + \
                    "\n                              \"jdbcUrl\":[" + \
                    "\n                                       \"jdbc:mysql://{ip2}:%s/?useUnicode=true&characterEncoding=utf-8&zeroDateTimeBehavior=convertToNull&tinyInt1isBit=false&defaultFetchSize=5000&useCursorFetch=true\"" % port + \
                    "\n                              ]" + \
                    "\n                            }"

    json_str8 = "\n                      ]\n                   }\n             },\n"
    json_str9 = "             \"writer\":{\n                  \"name\":\"" + ("hdfsparquetwriter" if save_type == "parquet" else "hdfswriter") + "\",\n                  \"parameter\":{\n                      \"defaultFS\":\"hdfs://nameservice1\",\n                      \"fileType\":\"%s\",\n" % save_type

    partition_path = ""
    if (part_flag == 1):
        # 多库多表添加来源
        partition_path = "/dt=${dt}"

    json_str10 = "                      \"path\":\"/user/hive/warehouse/" + hive_db + ".db/%s%s\",\n" % (hive_tb, partition_path)

    # 单库单表，以表名命名文件
    file_name = (newTbName if etl_type != 3 else newTbName + "_${src}")


    json_str11 = "                      \"fileName\":\"%s\"," % file_name
    json_str12 = "\n                    \"column\":[\n"
    json_str13 = "%s \n                      ],\n" % (',\n'.join(datax_writer_json_list))
    json_str14 = "                     \"hadoopConfig\":{\n"
    json_str15 = "%s \n                      },\n" % (',\n'.join(hadoop_conf_json_list))
    json_str16 = "                     \"writeMode\":\"append\"," + \
                 "\n                   \"fieldDelimiter\":\"\\u0001\" " + \
                 "\n                 } " + \
                 "\n                }" + \
                 "\n            }" + \
                 "\n        ]" + \
                 "\n    }" + \
                 "\n}"
    dataxJsonStr = json_str1 + json_str2 + json_str3 + json_str4 + json_str5 + \
                   json_str6 + json_str7 + json_str8 + json_str9 + json_str10 + \
                   json_str11 + json_str12 + json_str13 + json_str14 + json_str15 + json_str16
    # f = open(dataxDir, 'a')
    # f.write("==================================================dataxJson============================================================== \r\n")
    # f.write(dataxJsonStr + "\r\n")
    # f.close()
    # print(dataxJsonStr + "\r\n")
    return dataxJsonStr


if __name__ == '__main__':
    try:

        parser = argparse.ArgumentParser()
        parser.description = 'please enter eight parameters ip and P ...'
        parser.add_argument("-ip", "--ip", help="this is host ip", dest="ip", type=str, default="172.20.51.224")
        parser.add_argument("-P", "--port", help="this is port", dest="port", type=int, default=3306)
        parser.add_argument("-u", "--user", help="this is username", dest="user", type=str, default="data_read")
        parser.add_argument("-p", "--passwd", help="this is password", dest="passwd", type=str, default="Hx2JcsrH")
        parser.add_argument("-db", "--database", help="this is database", dest="db", type=str,
                            default="information_schema")
        parser.add_argument("-tb", "--table", help="this is table", dest="tb", type=str, default="COLUMNS")
        parser.add_argument("-et", "--etl_type",
                            help="0: Only db Only tb，1: Only db Muti tb，2: Muti db Muti tb，3: Muti ip Muti db Muti tb",
                            dest="etl_type", type=int, default=0)
        parser.add_argument("-pt", "--is_partition", help="0: no partition, 1: dt-partition", dest="part_flag",
                            type=int, default=0)
        parser.add_argument("-wh", "--is_where", help="0: no where, 1: time col where", dest="where_flag", type=int,
                            default=0)
        parser.add_argument("-plt", "--pull_type", help="0: sqoop, 1: datax", dest="pull_type", type=int, default=0)
        parser.add_argument("-st", "--save_type", help="sqoop only support text, datax support (orc, parquet, text)", dest="save_type", type=str,
                            default=0)
        parser.add_argument("-hdb", "--hive_database", help="hive database", dest="hive_db", type=str, default="ods")

        args = parser.parse_args()


        createTableStr, dataxJsonStr, shellSrciptStr = queryData(args.ip, args.port, args.user, args.passwd, args.db,
                                                                 args.tb, args.etl_type, args.part_flag,
                                                                 args.where_flag, args.pull_type, args.save_type, args.hive_db)

        # sqooop 抽数
        if args.pull_type == 0:
            print(createTableStr)
            print(shellSrciptStr)
        # datax 抽数
        else:
            # 生成表结构
            print(createTableStr)
            # 生成datax的json
            print(dataxJsonStr)
            # 生成启动datax抽数脚本
            print(shellSrciptStr)

    except Exception as e:
        print('Error:', e)