import re
import os
import json
from typing import Dict, Callable, Optional, Union, List

# MySQL到Hive类型映射
MYSQL_TO_HIVE_TYPE_MAPPING: Dict[str, Union[str, Callable]] = {
    r'TINYINT(\(\d+\))?(\s+UNSIGNED)?': 'TINYINT',
    r'SMALLINT(\(\d+\))?(\s+UNSIGNED)?': 'SMALLINT',
    r'INT(\(\d+\))?(\s+UNSIGNED)?': 'INT',
    r'INTEGER(\(\d+\))?(\s+UNSIGNED)?': 'INT',
    r'BIGINT(\(\d+\))?(\s+UNSIGNED)?': 'BIGINT',
    r'FLOAT(\(\d+,\d+\))?(\s+UNSIGNED)?': 'FLOAT',
    r'DOUBLE(\(\d+,\d+\))?(\s+UNSIGNED)?': 'DOUBLE',
    r'DECIMAL(\(\s*(\d+)\s*,\s*(\d+)\s*\))': lambda m: f"DECIMAL({int(m.group(1))},{int(m.group(2))})",
    r'DATE': 'DATE',
    r'DATETIME': 'TIMESTAMP',
    r'TIMESTAMP(\(\d+\))?': 'TIMESTAMP',
    r'TIME': 'STRING',
    r'YEAR(\(\d+\))?': 'SMALLINT',
    r'CHAR\(\s*\d+\s*\)': 'STRING',
    r'VARCHAR\(\s*\d+\s*\)': 'STRING',
    r'TINYTEXT': 'STRING',
    r'TEXT': 'STRING',
    r'MEDIUMTEXT': 'STRING',
    r'LONGTEXT': 'STRING',
    r'BINARY\(\s*\d+\s*\)': 'BINARY',
    r'VARBINARY\(\s*\d+\s*\)': 'BINARY',
    r'TINYBLOB': 'BINARY',
    r'BLOB': 'BINARY',
    r'MEDIUMBLOB': 'BINARY',
    r'LONGBLOB': 'BINARY',
    r'ENUM\([^)]+\)': 'STRING',
    r'SET\([^)]+\)': 'ARRAY<STRING>',
    r'JSON': 'STRING',
    r'GEOMETRY': 'BINARY'
}


class HiveTableGenerator:
    def __init__(self, config: Dict):
        """
        初始化转换器
        :param config: 配置字典，包含以下键：
            - MYSQL_SQL_DIR: MySQL SQL文件或目录路径
            - HIVE_SQL_DIR: Hive SQL输出目录路径
            - TBLPROPERTIES: 表属性配置
        """
        self.mysql_path = config['MYSQL_SQL_DIR']
        self.hive_dir = config['HIVE_SQL_DIR']
        self.tblproperties = config.get('TBLPROPERTIES', {})

        # 确保输出目录存在
        os.makedirs(self.hive_dir, exist_ok=True)

    def map_mysql_type_to_hive(self, mysql_type: str) -> str:
        """将MySQL数据类型映射到Hive数据类型"""
        mysql_type = mysql_type.upper().strip()

        for pattern, hive_type in MYSQL_TO_HIVE_TYPE_MAPPING.items():
            compiled_pattern = re.compile(pattern, re.IGNORECASE)
            match = compiled_pattern.fullmatch(mysql_type)
            if match:
                if callable(hive_type):
                    return hive_type(match)
                return hive_type

        return 'STRING'

    def parse_mysql_create_table(self, sql: str) -> Dict:
        """解析MySQL的CREATE TABLE语句"""
        # 提取表名
        table_name_match = re.search(
            r'CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?`?([^\s`]+)`?',
            sql,
            re.IGNORECASE
        )
        if not table_name_match:
            raise ValueError("无法从SQL中提取表名")
        table_name = table_name_match.group(1)

        # 提取字段定义部分
        columns_section_match = re.search(r'\((.*)\)', sql, re.DOTALL)
        if not columns_section_match:
            raise ValueError("无法从SQL中提取字段定义部分")
        columns_section = columns_section_match.group(1)

        # 解析各个字段
        columns = []
        column_defs = re.split(r',\s*(?![^()]*\))', columns_section.strip())

        for column_def in column_defs:
            column_def = column_def.strip()
            if not column_def or column_def.startswith(('PRIMARY KEY', 'KEY', 'UNIQUE', 'INDEX')):
                continue

            # 提取字段名和类型
            column_match = re.match(r'`?([^`\s]+)`?\s+([^\s,]+)', column_def)
            if column_match:
                column_name = column_match.group(1)
                mysql_type = column_match.group(2)

                # 检查是否有注释
                comment_match = re.search(r'COMMENT\s+\'([^\']*)\'', column_def, re.IGNORECASE)
                comment = comment_match.group(1) if comment_match else None

                columns.append({
                    'name': column_name,
                    'mysql_type': mysql_type,
                    'comment': comment
                })

        # 提取表注释
        table_comment_match = re.search(r'COMMENT\s*=\s*\'([^\']*)\'', sql, re.IGNORECASE)
        table_comment = table_comment_match.group(1) if table_comment_match else None

        return {
            'table_name': table_name,
            'columns': columns,
            'table_comment': table_comment
        }

    def generate_hive_create_table(self, table_info: Dict) -> str:
        """生成Hive CREATE TABLE语句"""
        table_name = table_info['table_name']
        columns = table_info['columns']
        table_comment = table_info.get('table_comment')

        # 生成字段定义部分
        column_defs = []
        for column in columns:
            hive_type = self.map_mysql_type_to_hive(column['mysql_type'])
            column_def = f"`{column['name']}` {hive_type}"
            if column.get('comment'):
                column_def += f" COMMENT '{column['comment']}'"
            column_defs.append(column_def)

        # 构建TBLPROPERTIES部分
        tblproperties = []
        for key, value in self.tblproperties.items():
            if value:  # 只添加非空的属性
                tblproperties.append(f"'{key}'='{value}'")

        # 构建完整的CREATE TABLE语句
        hive_sql = f"CREATE EXTERNAL TABLE IF NOT EXISTS `{table_name}` (\n"
        hive_sql += ",\n".join(f"  {col_def}" for col_def in column_defs)
        hive_sql += "\n)"

        if table_comment:
            hive_sql += f"\nCOMMENT '{table_comment}'\nPARTITIONED BY (dt STRING comment '分区字段yyyy-MM-dd')"

        # 修复f-string中的反斜杠问题
        delimiter = r"\t"  # 使用原始字符串
        hive_sql += f"\nROW FORMAT DELIMITED\nFIELDS TERMINATED BY '{delimiter}'\nSTORED AS PARQUET"

        if tblproperties:
            # 将TBLPROPERTIES分成多行
            props = ",\n  ".join(tblproperties)
            hive_sql += f"\nTBLPROPERTIES (\n  {props}\n)"

        return hive_sql.replace('`','')

    def process_mysql_file(self, mysql_file: str):
        """处理单个MySQL文件"""
        try:
            with open(mysql_file, 'r', encoding='utf-8') as f:
                mysql_sql = f.read()

            # 移除注释和多余的空格/换行
            mysql_sql = re.sub(r'--.*?\n', '', mysql_sql)
            mysql_sql = re.sub(r'/\*.*?\*/', '', mysql_sql, flags=re.DOTALL)
            mysql_sql = re.sub(r'\s+', ' ', mysql_sql).strip()

            table_info = self.parse_mysql_create_table(mysql_sql)
            hive_sql = self.generate_hive_create_table(table_info)

            # 生成输出文件名
            output_file = os.path.join(
                self.hive_dir,
                f"mysql_{table_info['table_name']}.hql"
            )

            with open(output_file, 'w', encoding='utf-8') as f:
                f.write(hive_sql)

            print(f"转换完成: {mysql_file} -> {output_file}")
            return True
        except Exception as e:
            print(f"处理文件 {mysql_file} 时出错: {str(e)}")
            return False

    def run(self):
        """执行转换过程"""
        if os.path.isfile(self.mysql_path):
            # 处理单个文件
            self.process_mysql_file(self.mysql_path)
        elif os.path.isdir(self.mysql_path):
            # 处理目录中的所有.sql文件
            for root, _, files in os.walk(self.mysql_path):
                for file in files:
                    if file.endswith('.sql'):
                        self.process_mysql_file(os.path.join(root, file))
        else:
            raise ValueError(f"路径不存在: {self.mysql_path}")


def main(config_json:str):


    try:
        config = json.loads(config_json)
        generator = HiveTableGenerator(config)
        generator.run()
        print("所有文件转换完成")
    except Exception as e:
        print(f"程序运行出错: {str(e)}")


if __name__ == '__main__':
    # 示例配置JSON
    config_json = """
    {
        "MYSQL_SQL_DIR": "D:/projects/yl_procject/yl-bgdm-airflow/src/kumiler_platform/ddl_create_script/mysql_source_ddl",
        "HIVE_SQL_DIR": "D:/projects/yl_procject/yl-bgdm-airflow/src/kumiler_platform/hive_output",
        "TBLPROPERTIES": {
            "CREATOR": "kumiler.lu",
            "SOURCE_SYSTEM": "spm",
            "TRANSLATED_FROM": "mysql",
            "SYNCH_RULE": "full",
            "SYNCH_COL": "update_time",
            "FREQUENCY": "daily"
        }
    }
    """
    main(config_json)