import os.path


def handleOneMysqlTable(table):
    '''
    提取mysql信息
    :param table:
    :return:
    '''
    # 表名
    table_name = table.split(" (")[0].strip().split(" ")[-1][1:-1]
    table_comment_str = table.split("`  (")[1].split(") ENGINE")[1]

    table_comment = ""
    if "COMMENT" in table_comment_str:
        table_comment = table_comment_str.split("COMMENT = ")[1].split(" ")[0].strip("'")

    # 字段信息
    fields = table.split("`  (")[1].split(") ENGINE")[0].split("PRIMARY KEY")[0].strip().strip(",")
    result = []
    for field_info in fields.split(",\n"):
        field_info = field_info.strip().split("COMMENT")
        field_name_type = field_info[0].strip("`").split(" ")
        field_name = field_name_type[0].lower()
        field_type = field_name_type[1].rstrip("(")
        field_comment = ""
        if len(field_info) == 2:
            field_comment = field_info[1].strip().strip("'")

        result.append(f"{field_name}---{field_type}---{field_comment}")
    # print({"name":table_name,"fields":result,"comment":table_comment})
    return {"name": table_name, "fields": result, "comment": table_comment}


def getMysqlFile(file_path):
    print("---------读取msyql库操作------------")
    # 1.读取mysql DDL文件
    with open(file_path, encoding='utf8', errors='ignore') as f:
        datas = f.readlines()

    # 2.分割表
    tables = "".join(datas).split("-- ----------------------------")

    # 3.提取字段，表名和注释
    mysql_tables = {}
    for table in tables:
        # 去掉空非表内容
        if table == "" or "Table structure" in table:
            continue
        result = handleOneMysqlTable(table)
        table_name = result["name"]
        fields = result["fields"]
        comment = result["comment"]
        fields_df = zip(*[field.split("---") for field in fields])
        mysql_tables[table_name] = {"fields": fields_df, "comment": comment}
        print(f"{table_name}\t{comment}")

    return mysql_tables


def handleOneHiveTable(table):
    '''
    提取hive表信息
    :param table:
    :return:
    '''
    # 1.获取表名
    table_info = table.split("`\n (\n`")
    table_main_info = table_info[1].split("PARTITIONED BY")[0]
    table_name = table_info[0].split(".")[-1].strip("`")

    # 提取COMMENT
    table_comment_str = table_main_info.split("\n) ")[1]
    table_comment = ""
    if "COMMENT" in table_comment_str:
        table_comment = table_comment_str.split(" ")[1].strip("'")
    # 提取字段
    fields_info = table_main_info.split("\n) ")[0].split(",\n")
    result = []
    for filed_info in fields_info:
        filed_info_all = filed_info.strip().split("COMMENT")
        filed_info_list = filed_info_all[0].split(" ")
        field_name = filed_info_list[0].strip("`")
        field_type = filed_info_list[1]
        field_comment = ""
        if len(filed_info_all) == 2:
            field_comment = filed_info_all[1].strip().strip("'")
        result.append(f"{field_name}---{field_type}---{field_comment}")

    return {"name": table_name, "fields": result, "comment": table_comment}

    pass


def getHiveFile(file_path):
    print("---------读取hive库操作------------")
    # 1.读取hive DDL文件
    with open(file_path, encoding='utf8', errors='ignore') as f:
        datas = f.readlines()
    # 2.分割表
    tables = "".join(datas).strip().split("CREATE TABLE IF NOT EXISTS")[1:]
    # 3.提取字段，表名和注释
    hive_tables = {}
    for table in tables:
        result = handleOneHiveTable(table.strip())
        table_name = result["name"]
        fields = result["fields"]
        comment = result["comment"]
        fields_df = zip(*[field.split("---") for field in fields])
        hive_tables[table_name] = {"fields": fields_df, "comment": comment}
    return hive_tables


if __name__ == '__main__':
    import pandas as pd

    #########################################################################
    file_path = "data/ods_pub"  # 导出的ddl语句路径
    mysql_file_name = "ods_sg-ddl-20231013101924.ddl" # mysql ddl 文件名
    hive_file_name = "saas_account.sql" # hive ddl 文件名

    pre_fix = "ods_sg_pub_saas_account_"  # hive表前缀
    #########################################################################
    # if not os.path.exists(file_path):
    #     os.mkdir(file_path)

    mysql_sql_file_path = f"{file_path}/{mysql_file_name}"  # mysqlddl语句路径
    hive_sql_file_path = f"{file_path}/{hive_file_name}"  # hive表语句路径
    mysqlTables = getMysqlFile(mysql_sql_file_path)
    hiveTables = getHiveFile(hive_sql_file_path)

    # writer = pd.ExcelWriter('data/ods_sg_zl/data/result.xls')
    #
    # file = pd.ExcelFile("data/ods_sg_zl/data/模型.xlsx")
    # for sheet_name in file.sheet_names:
    #     data_pd = pd.read_excel("data/ods_sg_zl/data/模型.xlsx", sheet_name=sheet_name)
    #     data_pd.to_excel(writer, index=None, engine='openpyxl', sheet_name=sheet_name)

    if len(mysqlTables) != len(hiveTables): print(f"mysql个数：{len(mysqlTables)},hive个数：{len(hiveTables)}")
    mysql_all = mysqlTables.keys()

    for mysqltable in mysql_all:
        hivetable = f"{pre_fix}{mysqltable}_df"
        if hivetable in hiveTables.keys():
            hive_filed_data = list(hiveTables[hivetable]["fields"])
            mysql_filed_data = list(mysqlTables[mysqltable]["fields"])
            mysql_comment = mysqlTables[mysqltable]["comment"]

            english_name = hive_filed_data[0]
            chinese_name = hive_filed_data[2]
            field_type = hive_filed_data[1]
            field_comment = hive_filed_data[2]
            source_table_name = [mysqltable] * len(hive_filed_data[0])
            source_english_name = mysql_filed_data[0]
            source_field_lenth = mysql_filed_data[1]
            field_length = len(hive_filed_data[0])

            if len(english_name) != len(source_english_name):
                print(">>>>字段修改表:" + mysqltable, hivetable)
                source_field_lenth = [" "] * field_length
                source_english_name = [" "] * field_length

            data = {
                "英文字段名": english_name,
                "中文字段名": chinese_name,
                "字段类型": field_type,
                "字段注释": field_comment,
                "口径（取数逻辑）": [" "] * field_length,
                "业务限定": [" "] * field_length,
                "来源表_英文名": source_table_name,
                "来源字段_英文名": english_name,
                "来源字段_字段长度": source_field_lenth
            }
            try:
                data_pd = pd.DataFrame(data)
                data_pd.to_csv(f"{file_path}/{hivetable}.csv", index=None)

                # data_pd.to_excel(writer,index=None,engine='openpyxl',sheet_name=hivetable)
            except Exception as e:
                print(mysqltable, hivetable, e)

        else: print("新增表"+mysqltable)


