"""
实现思路
1、读取源业务库所有表的见表语句
2、解析原表建表语句的字段和表名  --需要使用正则表达式
3、循环生成datax的json脚本，hive的建表语句，shell脚本
"""

import os
import re

# 类型的映射关系
mapping = {
    "int": "INT",
    "varchar": "STRING",
    "decimal": "decimal(10,2)",
    "datetime": "TIMESTAMP(3)",
    "tinytext": "STRING"
}


# 将mysql字段类型转换成flink字段类型
def mysql_type_to_flink_type(mysql_col_type):
    flink_col_type = []

    for mysql_type in mysql_col_type:

        flag = True

        for mysql_type_tmp, hive_type in mapping.items():
            if flag:
                if mysql_type_tmp in mysql_type:
                    flink_col_type.append(hive_type)
                    flag = False

        if flag:
            flink_col_type.append("STRING")

    # 返回hvie的字段类型
    return flink_col_type


if __name__ == '__main__':
    root_dir = "../mysql_ddl/"
    # 1、读取源业务库所有表的见表语句
    mysql_ddl_files = os.listdir(root_dir)

    for ddl_file in mysql_ddl_files:
        # 拼接sql文件的路径
        ddl_path = root_dir + ddl_file

        # 解析表名
        mysql_table_name = ddl_file.split(".")[0]

        # 拼接hive表名
        hive_table_name = f"ods_{mysql_table_name}"

        # cdc表名
        cdc_table_name = f"cdc_{mysql_table_name}"
        # ods topic名称
        kafka_topic_name = f"ods_kafka_{mysql_table_name}"

        # 读取文件内容
        with open(ddl_path, mode="r", encoding="utf-8") as file:
            # 读取数据，得到一个字符串的sql语句
            ddl_str = "".join(file.readlines())

            #  re.S： 多行模式
            # 取出所有字段和类型
            col_str = re.search("\((.*)PRIMARY KEY", ddl_str, re.S).group(1)
            col_str = col_str.strip()[:-1].replace("decimal(10,2)", "decimal").replace("`", "")

            # 取出所有的字段名
            col_list = col_str.split(",")
            # 取出字段名
            mysql_col_name = [col.strip().split(" ")[0] for col in col_list]

            # 取出原表字段类型
            mysql_col_type = [col.strip().split(" ")[1] for col in col_list]

            # 将mysql字段类型转换成hive字段类型
            flink_col_type = mysql_type_to_flink_type(mysql_col_type)

            # 取出主键
            pk = re.search("PRIMARY KEY \((.*?)\)", ddl_str, re.S).group(1)


            #################生产datax json文件########################

            with open("model/flink_cdc_model.sql", mode="r", encoding="utf-8") as ods_model:
                # 读取dataxjson模板
                flink_model = "".join(ods_model.readlines())
            # 替换cdc表名
            flink_model = flink_model.replace("{cdc_table_name}", cdc_table_name)

            # 替换flink字段
            flink_col_name = ",".join([f"\n    {c} {t}" for c, t in zip(mysql_col_name, flink_col_type)])

            flink_model = flink_model.replace("{flink_col_name}", f"{flink_col_name},".lstrip())

            # 替换主键
            flink_model = flink_model.replace("{pk}", pk)

            # 替换topic名称
            flink_model = flink_model.replace("{kafka_topic_name}", kafka_topic_name)
            flink_model = flink_model.replace("{ods_table_name}", kafka_topic_name)

            flink_model = flink_model.replace("{mysql_table_name}", mysql_table_name)

            # 保存json文件
            with open(f"../ods/{kafka_topic_name}.sql", mode="w", encoding="utf-8") as file:
                file.write(flink_model)
                file.flush()
                file.close()


