#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import re
from datetime import datetime


# 配置参数
# 配置参数（请修改为实际路径）
ORACLE_SQL_DIR = "D:/projects/yl_procject/yl-bgdm-airflow/src/kumiler_platform/ddl_create_script/source_table_ddl"  # 必须是目录
HIVE_SQL_DIR = "D:/projects/yl_procject/yl-bgdm-airflow/src/kumiler_platform/hive_output"  # 必须是目录
CREATOR = "kumiler.lu"
SOURCE_SYSTEM = "ERP_SYSTEM"

# SQL关键字列表（需要忽略）
SQL_KEYWORDS = {
    'create', 'table', 'not', 'null', 'default', 'comment', 'on', 'is',
    'partition', 'by', 'range', 'add', 'primary', 'key', 'foreign',
    'references', 'check', 'unique', 'index', 'constraint', 'using'
}

# 数据类型映射
TYPE_MAPPING = {
    r'NUMBER\(\s*[1-3]\s*\)': 'TINYINT',
    r'NUMBER\(\s*[4-5]\s*\)': 'SMALLINT',
    r'NUMBER\(\s*[6-9]\s*\)': 'INT',
    r'NUMBER\(\s*1[0-8]\s*\)': 'BIGINT',
    r'NUMBER\(\s*\d+\s*,\s*\d+\s*\)': 'DECIMAL',
    r'FLOAT(\s*\(\s*\d+\s*\))?': 'FLOAT',
    r'REAL': 'FLOAT',
    r'BINARY_FLOAT': 'FLOAT',
    r'BINARY_DOUBLE': 'DOUBLE',
    r'DATE': 'TIMESTAMP',
    r'TIMESTAMP(\s*\(\s*\d+\s*\))?': 'TIMESTAMP',
    r'TIMESTAMP(\s*\(\s*\d+\s*\))?\s*WITH\s*TIME\s*ZONE': 'STRING',
    r'TIMESTAMP(\s*\(\s*\d+\s*\))?\s*WITH\s*LOCAL\s*TIME\s*ZONE': 'TIMESTAMP',
    r'INTERVAL\s+YEAR(\s*\(\s*\d+\s*\))?\s+TO\s+MONTH': 'STRING',
    r'INTERVAL\s+DAY(\s*\(\s*\d+\s*\))?\s+TO\s+SECOND(\s*\(\s*\d+\s*\))?': 'STRING',
    r'CHAR\s*\(\s*\d+\s*\)': 'STRING',
    r'VARCHAR2\s*\(\s*\d+\s*\)': 'STRING',
    r'NCHAR\s*\(\s*\d+\s*\)': 'STRING',
    r'NVARCHAR2\s*\(\s*\d+\s*\)': 'STRING',
    r'CLOB': 'STRING',
    r'NCLOB': 'STRING',
    r'BLOB': 'BINARY',
    r'BFILE': 'BINARY',
    r'RAW\s*\(\s*\d+\s*\)': 'BINARY',
    r'LONG\s*RAW': 'BINARY',
    r'ROWID': 'STRING',
    r'UROWID': 'STRING',
    r'XMLTYPE': 'STRING',
    r'SDO_GEOMETRY': 'BINARY'
}

def is_valid_column_name(name):
    """检查是否是有效的列名（非SQL关键字）"""
    return name.lower() not in SQL_KEYWORDS

def convert_data_type(oracle_type):
    """将Oracle数据类型转换为Hive数据类型"""
    oracle_type = oracle_type.upper().strip()
    for pattern, hive_type in TYPE_MAPPING.items():
        if re.fullmatch(pattern, oracle_type):
            if hive_type == 'DECIMAL':
                return oracle_type.replace('NUMBER', 'DECIMAL')
            return hive_type
    return 'STRING'



def validate_path(path, is_dir=True):
    """增强的路径验证"""
    if not os.path.exists(path):
        raise ValueError(f"路径不存在: {path}")
    if is_dir and not os.path.isdir(path):
        raise ValueError(f"路径不是目录: {path}")
    elif not is_dir and not os.path.isfile(path):
        raise ValueError(f"路径不是文件: {path}")
    return path


def extract_table_definition(content):
    """增强的表定义提取逻辑"""
    # 移除所有注释和空行
    lines = []
    for line in content.split('\n'):
        lines.append(line.strip())
    clean_content = ' '.join(lines)

    # 提取CREATE TABLE语句块
    table_match = re.search(
        r'CREATE\s+(?:GLOBAL\s+TEMPORARY\s+)?TABLE\s+([^\s(]+)\s*\((.*?)\)\s*[^)]*;',
        clean_content,
        re.IGNORECASE | re.DOTALL
    )

    if not table_match:
        print("调试: 未找到CREATE TABLE语句")
        print(f"内容片段: {clean_content[:200]}...")
        return None, None

    table_name = table_match.group(1).strip()
    print('table_name', table_name)
    db_name = 'bidefault'
    if '.' in table_name:
        table_name_true = table_name.split('.')[-1]
    if 'spmi' in table_name:
        db_name = 'spmi_ods'
    else:
        db_name = 'jms_ods'

    return db_name, table_name, table_match.group(2)


def parse_columns(columns_content):
    """解析列定义"""
    columns = {}
    # 分割列定义，处理包含逗号的情况（如函数默认值）
    column_defs = re.split(r',\s*(?![^()]*\))', columns_content)

    for col_def in column_defs:
        col_def = col_def.strip()
        if not col_def:
            continue

        # 提取列名和类型
        col_match = re.match(
            r'^([^\s]+)\s+([^\s(]+(?:\([^)]*\))?)(?:\s+(?:DEFAULT\s+[^,]+)?)?(?:\s+(?:NOT\s+NULL|NULL))?',
            col_def,
            re.IGNORECASE
        )

        if col_match:
            col_name = col_match.group(1).strip()
            col_type = col_match.group(2).strip()
            columns[col_name] = convert_data_type(col_type)

    return columns


def generate_hive_sql(db_name,table_name, columns, comments):
    """生成Hive建表SQL"""
    current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    # 构建列定义 - 确保只包含有效列
    column_lines = []
    for col_name, col_type in columns.items():
        if not is_valid_column_name(col_name):
            continue

        col_comment = comments['column_comments'].get(col_name, '')
        safe_comment = col_comment.replace("'", "''") if col_comment else ''

        col_line = f"    {col_name} {col_type}"
        if safe_comment:
            col_line += f" COMMENT '{safe_comment}'"
        column_lines.append(col_line)

    # 处理表注释
    table_comment = comments['table_comment'] or f'从Oracle表 {table_name} 转换的外部表'
    safe_table_comment = table_comment.replace("'", "''")

    # 构建完整的Hive SQL - 分步构建避免复杂的f-string
    columns_part = ',\n'.join(column_lines)
    table_comment_part = f"COMMENT '{safe_table_comment}'"

    hive_sql = f"""CREATE EXTERNAL TABLE IF NOT EXISTS {db_name}.{table_name} (
{columns_part}
)
{table_comment_part}
PARTITIONED BY (dt STRING COMMENT '日期分区字段')
STORED AS PARQUET
LOCATION '/dw/hive/{db_name}.db/external/{table_name}'
TBLPROPERTIES (
    'creator'='{CREATOR}',
    'external.table.purge'='false',
    'translated_from'='oracle',
    'source_system'='{SOURCE_SYSTEM}',
    'synch_rule' = 'FULL',
    'synch_col' = '1=1',
    'frequency' = 'daily'
);""".lower()

    return hive_sql

def extract_comments(content):
    """从内容中提取表和字段的注释信息"""
    comments = {
        'table_comment': None,
        'column_comments': {}
    }

    # 提取表注释
    table_comment_match = re.search(
        r'COMMENT\s+ON\s+TABLE\s+\S+\s+IS\s+\'(.*?)\'',
        content,
        re.IGNORECASE
    )
    if table_comment_match:
        comments['table_comment'] = table_comment_match.group(1)

    # 提取字段注释
    column_comment_matches = re.finditer(
        r'COMMENT\s+ON\s+COLUMN\s+\S+\.(\w+)\s+IS\s+\'(.*?)\'',
        content,
        re.IGNORECASE
    )

    for match in column_comment_matches:
        column_name = match.group(1)
        column_comment = match.group(2)
        comments['column_comments'][column_name] = column_comment

    return comments

def process_file(filepath):
    """处理单个文件"""
    try:
        with open(filepath, 'r', encoding='utf-8') as f:
            content = f.read()

        db_name,table_name, columns_content = extract_table_definition(content)
        if not table_name or not columns_content or not db_name:
            print(f"警告: 文件 {os.path.basename(filepath)} 中未找到有效的表定义")
            return None

        columns = parse_columns(columns_content)
        comments = extract_comments(content)

        if not columns:
            print(f"警告: 文件 {os.path.basename(filepath)} 中未提取到列定义")
            return None

        return generate_hive_sql(db_name,table_name, columns, comments)

    except Exception as e:
        print(f"处理文件 {os.path.basename(filepath)} 时出错: {str(e)}")
        return None


def main():
    """主函数"""
    try:
        validate_path(ORACLE_SQL_DIR, is_dir=True)
        os.makedirs(HIVE_SQL_DIR, exist_ok=True)

        processed = 0
        for filename in os.listdir(ORACLE_SQL_DIR):
            if not filename.lower().endswith('.sql'):
                continue

            oracle_file = os.path.join(ORACLE_SQL_DIR, filename)
            hive_file = os.path.join(HIVE_SQL_DIR, f"hive_{filename}")

            if not os.path.isfile(oracle_file):
                continue

            hive_sql = process_file(oracle_file)
            if hive_sql:
                with open(hive_file, 'w', encoding='utf-8') as f:
                    f.write(hive_sql)
                print(f"成功转换: {filename}")
                processed += 1

        print(f"转换完成! 共处理 {processed} 个文件")

    except Exception as e:
        print(f"运行失败: {str(e)}")


if __name__ == '__main__':
    main()