# excel2sql.py
# usage: python ./src/vector/excel2json.py

import os
import sys
import json
from typing import List, Dict

project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, project_root)
from config import project_config
import pandas as pd


def excel_to_json_ddl(excel_path: str, output_dir: str = project_config.sql_output_path):
    """
    将 Excel 表结构转换为多个 JSON 文件，每表一个 .json 文件
    每个 JSON 文件内容格式：
    {
        "id": 1,
        "table": "user_info",
        "DDL": "CREATE TABLE user_info (...);"
    }
    """
    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)

    # 读取 Excel
    df = pd.read_excel(excel_path)

    # 清理列名
    df.columns = df.columns.str.strip()

    # 处理合并单元格：向前填充表名和表描述
    # df['表名'].fillna(method='ffill', inplace=True)
    # df['表描述'].fillna(method='ffill', inplace=True)
    df[['表名', '表描述']] = df[['表名', '表描述']].ffill()

    # 类型映射：Excel 类型 → SQLite 类型
    type_mapping = {
        'int': 'INTEGER',
        'integer': 'INTEGER',
        'string': 'TEXT',
        'text': 'TEXT',
        'varchar': 'TEXT',
        'char': 'TEXT',
        'float': 'REAL',
        'double': 'REAL',
        'real': 'REAL',
        'bool': 'INTEGER',
        'boolean': 'INTEGER',
        'date': 'TEXT',
        'datetime': 'TEXT',
        'timestamp': 'TEXT'
    }

    # 按表名分组
    grouped = df.groupby('表名')

    # 记录已处理的表，用于 id 编号
    table_entries: List[Dict] = []
    for idx, (table_name, group) in enumerate(grouped, start=1):
        safe_table_name = "".join(c for c in table_name if c.isalnum() or c in ['_', '-']).rstrip()
        json_file_path = os.path.join(output_dir, f"{safe_table_name}.json")

        # 构建 DDL 语句（与原来相同）
        ddl_lines = []

        # 表注释（可选）
        table_comment = group['表描述'].iloc[0]
        # if pd.notna(table_comment):
        #     ddl_lines.append(f"-- {table_comment}")

        ddl_lines.append(f"CREATE TABLE {safe_table_name} (")

        fields = []
        for _, row in group.iterrows():
            field_name = row['字段名']
            field_type = str(row['字段类型']).strip().lower()
            field_desc = row['字段描述']

            if pd.isna(field_name) or pd.isna(field_type):
                continue

            sqlite_type = type_mapping.get(field_type, 'TEXT')

            # 判断是否为主键
            is_primary = False
            if 'id' in str(field_name).lower() and ('自增' in str(field_desc) or '主键' in str(field_desc)):
                is_primary = True

            field_def = f" {field_name} {sqlite_type}"
            if is_primary:
                field_def += " PRIMARY KEY AUTOINCREMENT"

            if pd.notna(field_desc):
                field_def += f" COMMENT '{field_desc}'"

            fields.append(field_def)

        ddl_lines.append(",".join(fields))
        ddl_lines.append(");")

        full_ddl = " ".join(ddl_lines)

        # 构造 JSON 数据
        entry = {
            "name": table_comment,
            "table": safe_table_name,
            "DDL": full_ddl
        }

        # 写入单独的 JSON 文件
        with open(json_file_path, 'w', encoding='utf-8') as f:
            json.dump(entry, f, ensure_ascii=False, indent=1)

        print(f"✅ 已生成: {json_file_path}")
        table_entries.append(entry)

    # （可选）也可以汇总成一个 all_tables.json
    # with open(os.path.join(output_dir, "all_tables.json"), 'w', encoding='utf-8') as f:
    #     json.dump(table_entries, f, ensure_ascii=False, indent=2)

    print(f"\n🎉 所有表已导出到目录: {output_dir}，共 {len(table_entries)} 个表")


# ========== 使用示例 ==========
if __name__ == "__main__":
    excel_to_json_ddl(
        excel_path='./data/表结构.xlsx',      # 你的 Excel 文件路径
        output_dir='sql_output'               # 输出目录（保存 .json 文件）
    )