import sqlite3
import json
from console_logger import setup_logger
from typing import List, Dict, Any


class SchemaAnalysisDB:
    def __init__(self, db_path: str = "schema_analysis.db"):
        """初始化数据库连接"""
        self.db_path = db_path
        self.conn = None
        self.create_table()

    def connect(self):
        """建立数据库连接"""
        self.conn = sqlite3.connect(self.db_path)
        # 启用外键约束
        self.conn.execute("PRAGMA foreign_keys = ON")

    def create_table(self):
        """创建数据表"""
        try:
            self.connect()
            cursor = self.conn.cursor()

            # 创建主表
            create_table_sql = """
            CREATE TABLE IF NOT EXISTS schema_analysis (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                field_name TEXT NOT NULL DEFAULT '字段名',
                database_name TEXT NOT NULL DEFAULT '数据库名',
                table_name TEXT NOT NULL DEFAULT '表名',
                comment TEXT DEFAULT '注释',
                data_type TEXT,
                length INTEGER,
                content_sample TEXT,
                tags TEXT,
                confidence_level REAL CHECK(confidence_level >= 0.0 AND confidence_level <= 1.0),
                created_time DATETIME DEFAULT CURRENT_TIMESTAMP
            )
            """
            cursor.execute(create_table_sql)

            logger = setup_logger()
            logger.info("建表成功")
            # 创建索引
            index_sqls = [
                "CREATE INDEX IF NOT EXISTS idx_database_table ON schema_analysis(database_name, table_name)",
                "CREATE INDEX IF NOT EXISTS idx_field ON schema_analysis(field_name)",
                "CREATE INDEX IF NOT EXISTS idx_created_time ON schema_analysis(created_time)",
                "CREATE INDEX IF NOT EXISTS idx_confidence ON schema_analysis(confidence_level)",
            ]
            for sql in index_sqls:
                cursor.execute(sql)

            self.conn.commit()

        except sqlite3.Error as e:
            logger.info(e)
        finally:
            if self.conn:
                self.conn.close()


def extract_sample_data_from_json(sample_data_json_file: str) -> dict:
    """
    从JSON文件提取字段样本数据

    将每个数据库字段的所有样本值收集到列表中，便于后续分析处理。

    Returns:
        dict: 字段标识到样本值列表的映射
    """

    columns = {}
    with open(sample_data_json_file, mode="r", encoding="utf-8") as f:
        data = json.load(f)
    for table_info in data.values():
        if not isinstance(table_info, dict):
            continue
        schema = table_info.get("schema")
        schema = schema.replace("`", "") if schema else schema
        table = table_info.get("table")
        prefix = f"{schema}.{table}"

        for row in table_info.get("data", []):
            for col_key, col_value in row.items():
                col_id = f"{prefix}.{col_key}"
                if col_id not in columns:
                    columns[col_id] = []
                columns[col_id].append(col_value)
    return columns


def extract_meta_data_from_json(meta_data_json_file: str) -> List[Dict[str, Any]]:
    """从JSON文件中提取元数据"""
    with open(meta_data_json_file, mode="r", encoding="utf-8") as f:
        data = json.load(f)

    all_col_list = data.get("all-table-columns", [])

    field_info_list = []
    for col_list in all_col_list:
        if not isinstance(col_list, list):
            continue

        type_map = {}
        for items in col_list:
            # 字段类型 映射处理
            column_data_type = items.get("column-data-type")
            if not isinstance(column_data_type, dict):
                continue
            uuid = items.get("column-data-type").get("@uuid")
            type_name = items.get("column-data-type").get("name")
            if uuid and type_name:
                type_map[uuid] = type_name

        # 提取 字段名、数据库名、表名、类型、字段长度、注释
        for items in col_list:
            field_name = items.get("name")
            schema_name = items.get("full-name").replace('"', "").split(".", 1)[0]
            table_name = items.get("short-name").replace("`", "").split(".", 1)[0]
            field_len = items.get("size")
            remark = items.get("remarks")  # 注释
            type_uuid = items.get("type")
            field_type = type_map.get(type_uuid)

            field_info = {
                "field_name": field_name,
                "schema_name": schema_name,
                "table_name": table_name,
                "field_type": field_type,
                "field_len": field_len,
                "remark": remark,
            }
            field_info_list.append(field_info)
    return field_info_list


def combine_meta_sample_data():
    """将样本数据拼接到元数据中，补充样本值"""
    root_dir = Path(__file__).parent.parent
    sample_file = root_dir / "sample.json"
    meta_file = root_dir / "mysql.json"
    columns = extract_sample_data_from_json(sample_file)
    field_info_list = extract_meta_data_from_json(meta_file)

    for meta_data in field_info_list:
        schema_name = meta_data.get("schema_name")
        table_name = meta_data.get("table_name")
        field_name = meta_data.get("field_name")

        if schema_name and table_name and field_name:
            column_ref = f"{schema_name}.{table_name}.{field_name}"
            meta_data["content"] = columns.get(column_ref)
        else:
            meta_data["content"] = ""

    return field_info_list


if __name__ == "__main__":
    # 创建数据库实例
    # db = SchemaAnalysisDB("schema_analysis.db")

    from pathlib import Path

    root_dir = Path(__file__).parent.parent
    target_file = root_dir / "sample1.json"
    # results = extract_meta_data_from_json(target_file)
    # print(f"类型-uuid映射关系: {results}")

    # results = extract_sample_data_from_json(target_file)
    # print(results)
    results = combine_meta_sample_data()
    print(results)
