# 加载MySQL中所有的建表语句
import os
import re
import json

# 构建MySQL和Hive类型的映射Dict字典
mysql_and_hive_type_mapping = {
    "int": "bigint"
    , "varchar": "String"
    , "datetime": "String"
    , "decimal": "Double"
    , "tinytext": "String"
}

update_column_mapping = {
    "t_commodity": "update_time",
    "t_commodity_cate": "update_time",
    "t_coupon": "create_time",
    "t_coupon_member": "update_time",
    "t_coupon_order": "create_time",
    "t_delivery": "arrive_time",
    "t_member": "update_time",
    "t_member_addr": "update_time",
    "t_order": "update_time",
    "t_order_commodity": "update_time",
    "t_shop": "update_time",
    "t_shop_order": "done_time",
    "t_user": "update_time"
}

for mysql_ddl in os.listdir("../mysql_ddl"):
    with open(f"../mysql_ddl/{mysql_ddl}", mode="r", encoding="utf8") as f:
        # 将列表转换成字符串
        mysql_ddl_str = "".join(f.readlines())

        # 提取表名、字段名、字段的类型并转换成Hive支持的类型
        # 提取表名
        # 使用正则表达式的方式
        mysql_table_name = re.search("`(.*)`", mysql_ddl_str.split("\n")[0]).group(1)
        # 提取 字段以及类型
        # re.S表示遇到\n不会停止匹配
        fields_and_types_str = re.search(r"\(\n(.*)PRIMARY", mysql_ddl_str, re.S).group(1)
        fields_and_types_list = []
        fields_names = []
        columns = []
        for field_and_type_str in fields_and_types_str.split(",\n")[:-1]:
            # 提取列名
            field = field_and_type_str.strip().split(" ")[0].replace("`", "")
            fields_names.append(f'"{field}"')
            columns.append(field)
            # 提取MySQL的类型
            field_type = field_and_type_str.strip().split(" ")[1]
            # 转换成Hive的类型
            hive_type = mysql_and_hive_type_mapping.get(field_type.split("(")[0], "NoneType")
            hive_field_and_type = f"\t{field} {hive_type}"
            fields_and_types_list.append(hive_field_and_type)
        # 提取MySQL的主键作为splitPk
        mysql_pk = re.search(r"PRIMARY KEY \(`(.*)`\)", mysql_ddl_str, re.S).group(1)
        mysql_column = ",".join(fields_names)
        hive_column_and_types_list = []
        for field_and_type in fields_and_types_list:
            dict_tmp = {"name": "", "type": ""}
            field, field_type = field_and_type.strip().split(" ")
            dict_tmp["name"] = field
            dict_tmp["type"] = field_type
            hive_column_and_types_list.append(dict_tmp)
        ################## 生成ods全量表的建表语句 ##################
        ods_table_name = f"ods_{mysql_table_name}_d"
        # 加载ods全量建表语句模版
        with open("./template/ods_create_table.sql", mode="r", encoding="utf8") as f1:
            with open(f"../ddl/{ods_table_name}.sql", mode="w", encoding="utf8") as f2:
                hive_ddl = "".join(f1.readlines()) \
                    .replace("{ods_table_name}", ods_table_name) \
                    .replace("{fields_and_types}", ",\n".join(fields_and_types_list))
                f2.write(hive_ddl)
        ################## 生成全量表的DataX采集脚本 ##################
        with open("./template/datax_etl.json", mode="r", encoding="utf8") as f1:
            with open(f"../datax/datax_{ods_table_name}.json", mode="w", encoding="utf8") as f2:
                hive_ddl = "".join(f1.readlines()) \
                    .replace("{mysql_column}", mysql_column) \
                    .replace("{pk}", mysql_pk) \
                    .replace("{mysql_table_name}", mysql_table_name) \
                    .replace("{ods_table_name}", ods_table_name) \
                    .replace("{hive_column_and_types}", json.dumps(hive_column_and_types_list))
                f2.write(hive_ddl)
        ################## 生成全量表采集启动脚本 ##################
        with open("./template/sh_datax.sh", mode="r", encoding="utf8") as f1:
            with open(f"../bin/start_{ods_table_name}_datax_job.sh", mode="w", encoding="utf8", newline="\n") as f2:
                hive_ddl = "".join(f1.readlines()) \
                    .replace("{ods_table_name}", ods_table_name)
                f2.write(hive_ddl)
        if mysql_table_name in update_column_mapping:
            ################## 生成ods增量表的建表语句 ##################
            ods_delta_table_name = f"ods_{mysql_table_name}_delta_d"
            with open("./template/ods_delta_create_table.sql", mode="r", encoding="utf8") as f1:
                with open(f"../ddl_delta/{ods_delta_table_name}.sql", mode="w", encoding="utf8") as f2:
                    hive_ddl = "".join(f1.readlines()) \
                        .replace("{ods_delta_table_name}", ods_delta_table_name) \
                        .replace("{fields_and_types}", ",\n".join(fields_and_types_list))
                    f2.write(hive_ddl)

            ################## 生成ods增量表的DataX采集脚本##################
            with open("./template/datax_delta_etl.json", mode="r", encoding="utf8") as f1:
                with open(f"../datax_delta/datax_{ods_delta_table_name}.json", mode="w", encoding="utf8") as f2:
                    hive_ddl = "".join(f1.readlines()) \
                        .replace("{mysql_column}", mysql_column) \
                        .replace("{pk}", mysql_pk) \
                        .replace("{update_column}", update_column_mapping.get(mysql_table_name)) \
                        .replace("{mysql_table_name}", mysql_table_name) \
                        .replace("{ods_delta_table_name}", ods_delta_table_name) \
                        .replace("{hive_column_and_types}", json.dumps(hive_column_and_types_list))
                    f2.write(hive_ddl)
            ################## 生成增全量数据合并的SQL脚本##################
            with open("./template/merge.sql", mode="r", encoding="utf8") as f1:
                with open(f"../merge/merge_{ods_table_name}.sql", mode="w", encoding="utf8") as f2:
                    hive_ddl = "".join(f1.readlines()) \
                        .replace("{ods_table_name}", ods_table_name) \
                        .replace("{columns}", ",".join(columns)) \
                        .replace("{pk}", mysql_pk) \
                        .replace("{update_column}", update_column_mapping.get(mysql_table_name)) \
                        .replace("{ods_delta_table_name}", ods_delta_table_name)
                    f2.write(hive_ddl)
            ################## 生成增量表采集启动脚本 ##################
            with open("./template/sh_datax_delta.sh", mode="r", encoding="utf8") as f1:
                with open(f"../bin_delta/start_merge_{ods_table_name}_datax_job.sh", mode="w", encoding="utf8", newline="\n") as f2:
                    hive_ddl = "".join(f1.readlines()) \
                        .replace("{ods_delta_table_name}", ods_delta_table_name) \
                        .replace("{ods_table_name}", ods_table_name)
                    f2.write(hive_ddl)