import json
import zipfile
from io import BytesIO, StringIO

from pandas import DataFrame
from sqlalchemy import Engine, text, create_engine, MetaData, Integer, BigInteger, SmallInteger, inspect, \
    ForeignKeyConstraint, UniqueConstraint
from sqlalchemy.dialects.mssql.information_schema import constraints
from sqlalchemy.sql.ddl import CreateTable

from ai_cleint import OllamaClient
import pandas as pd
from sdv.metadata import Metadata, MultiTableMetadata
from data_base_tool import SQLDatabaseTool
from response_parser import response_parser
from cachetools import cached, LRUCache

global_cached = LRUCache(maxsize=100)








def get_sdv_metadata(database_id, table_name) -> Metadata:
    """
    创建 SDV metadata 并保存到 SQLite 数据库
    
    Args:
        source_engine: SQLAlchemy engine 对象
        table_name: 要处理的表名
    
    Returns:
        dict: 创建的 metadata
    """
    # first read metadata from sqlite
    sqlite_tool = get_sql_lite_tool()
    res = sqlite_tool.get_table_data('database_single_table_pattern',
                                     where_clause=f"table_name='{table_name}' and database_id='{database_id}'", limit=1)
    data = res.get('data')
    if data:
        return Metadata.load_from_dict(json.loads(data[0]['table_metadata']))

    source_engine = get_sql_databaseTool(database_id).engine
    # mysql + pymysql: // user: password @ host:port / database
    try:
        sqlite_tool = get_sql_lite_tool().engine
        # 获取表数据（前10行）
        with sqlite_tool.connect() as connection:
            # 查询配置表

            query = text("SELECT ai_type_name, sdv_type_name FROM ai_sdv_datatype_config")
            result = connection.execute(query)
            all = result.fetchall()
            candidate_type_list = [row[0] for row in all]
            # 将结果转换为字典格式
            ai_sdv_config = {row[0]: row[1] for row in all}

        sqlite_tool = get_sql_databaseTool(database_id)
        df = sqlite_tool.get_table_data_frame(table_name,size=20)
        ollama_client = OllamaClient(model='llama3.2-vision:11b')

        metadata = Metadata.detect_from_dataframe(df)

        reponse = ollama_client.analyze_data_type(
            df.to_json(orient='records', force_ascii=False, date_format='iso', index=False),
            candidate_types=candidate_type_list)
        if reponse != 'error':
            table_info = response_parser.parse(reponse)
            if isinstance(result, list):
                table_info = table_info[0]
            for key, value in table_info.items():
                if ai_sdv_config.get(value) and value != 'unknown':
                    metadata.update_column(key, 'table', sdtype=ai_sdv_config.get(value))
            ## save metadata
            metadata_str = str(metadata)
            data = {
                'database_id': database_id,
                'table_name': table_name,
                'table_metadata': metadata_str
            }
            get_sql_lite_tool().insert_data('database_single_table_pattern', data)

        # 返回 metadata
        return metadata

    except Exception as e:
        print(f"创建 metadata 失败: {str(e)}")
        return None


def get_sql_databaseTool(database_id: int) -> SQLDatabaseTool | None:
    if database_id:
        sql_database_tool = global_cached.get('database_tool_{}'.format(database_id))

        if sql_database_tool is None:

            sql_database_tool = get_sql_lite_tool()
            result = sql_database_tool.get_table_data('database_connections', where_clause=f"id={database_id}", limit=1)
            if result.get('data'):
                db_config = result.get('data')[0]

                sql_database_tool = SQLDatabaseTool(db_config, close_session=False)
                global_cached['database_tool_{}'.format(database_id)] = sql_database_tool
        return sql_database_tool


def get_sql_lite_tool() -> SQLDatabaseTool:
    sql_lite_tool = global_cached.get('sql_lite_tool')
    if sql_lite_tool is None:
        sql_lite_config = {
            'type':'Sqlite',
            'url':'sqlite:///' + './sqlite_db.db'
        }
        sql_lite_tool = SQLDatabaseTool(sql_lite_config, close_session=False)
        global_cached['sql_lite_tool'] = sql_lite_tool
    return sql_lite_tool


def create_all_database_metadata(database_id):
    try:
        table_metadata = MetaData()
        sql_tool = get_sql_databaseTool(database_id)
        datasource = get_data_source_info_by_id(database_id)
        engine = sql_tool.engine
        all_table = sql_tool.get_table_names()
        table_metadata.reflect(engine, schema=datasource['schema_name'],only=all_table)

        table_and_data = ''
        for table in all_table:
            full_table_name = table
            if datasource['schema_name'] is not None:
                full_table_name = datasource['schema_name'] + '.' + table
            source_table = table_metadata.tables[full_table_name]
            source_table.schema = None
            dll = str(CreateTable(source_table).compile(engine))
            table_and_data += '\n'
            table_and_data += dll
            datas = str(sql_tool.get_table_data(table_name=table, limit=5)['data'])
            table_and_data += '\n'
            table_and_data += datas

        str_input = ('''需求说明:
1. 请你语句下面的数据表建表语句和数据(每张表下面就是其对应的json数据)，分析表中所有潜在的外键关系，只需要输出外键关系并严格按指定格式输出JSON结果。

输出要求:
1.不需要分析和思考,直接输出结果即可
2.下面的输出JSON格式必须严格遵循
{ "table1": { "field1": ["foreign_table", "foreign_column"] }, "table2": { "field2": ["foreign_table", "foreign_column"], "字段2": ["foreign_table", "foreign_column"] } }


数据表建表语句和数据:\n''')
        str_input += table_and_data

        response = OllamaClient(model='llama3.2-vision:11b').get_data_base_relationship(str_input)
        if response != 'error':
            print(response)
            table_metadata = response_parser.extract_json_objects(response)
            print('------------get reponse------------{}'.format(table_metadata))
            ## select all tables
            table_data_dict = get_all_tables_data(database_id)
            metadata = MultiTableMetadata()
            metadata.detect_from_dataframes(table_data_dict)
            engine = get_sql_databaseTool(database_id).engine

            for table in all_table:
                sql_metadata = MetaData()
                sql_metadata.reflect(engine, only=[table])
                source_table = sql_metadata.tables[table]
                pk_columns = list(source_table.primary_key.columns)
                if len(pk_columns) == 1:
                    metadata.get_table_metadata(table).primary_key = pk_columns[0].name
                else:
                    metadata.remove_primary_key(table)

            ## 首先是外键关系
            for item in table_metadata.items():
                child_table_name = item[0]
                parent_table_keys = item[1]
                for key, value in parent_table_keys.items():
                    if value is not None and isinstance(value, list) and len(value) > 1:
                        parent_table_name = value[0]
                        parent_table_column_name = value[1]
                        try:
                            metadata.add_relationship(parent_table_name, child_table_name, parent_table_column_name,
                                                      key)
                        except Exception as e:
                            print(e)

            for table in all_table:

                single_metadata = get_sdv_metadata(database_id, table)
                single_metadata.to_dict().get('tables').get('table').get('columns')
                for col_name, properties in single_metadata.to_dict().get('tables').get('table').get('columns').items():
                    metadata.update_column(table, col_name, **properties)

            return metadata
    except Exception as e:
        print(e)
        return None


def get_all_tables_data(database_id,fix_count = -1, min_count = -1,max_count = -1,rate = -1) -> dict[str, DataFrame]:
    database_tool = get_sql_databaseTool(database_id)
    all_table = database_tool.get_table_names()
    table_data_dict = {}
    for table in all_table:
        ## get count
        count = -1
        if fix_count < 0:
            sum_count = database_tool.count(table)
            count = int(sum_count * rate)
            if count < min_count:
                count = min_count
            if count > max_count:
                count = max_count
        else:
            count = fix_count
        if count < 0:
            count = 100
        table_data = database_tool.get_table_data_frame(table, size=count)
        table_data_dict[table] = table_data
    return table_data_dict


def get_table_metadata(source_database, table):
    table_metadata = MetaData()
    engine = get_sql_databaseTool(source_database['id']).engine
    table_metadata.reflect(engine,schema=source_database['schema_name'], only=[table])
    full_table_name = table
    if source_database['schema_name'] is not None:
        full_table_name = source_database['schema_name'] + '.' + table
    source_table = table_metadata.tables[full_table_name]
    return source_table


def get_data_source_info_by_id(database_id):
    sqlite_tool = get_sql_lite_tool()
    datas = sqlite_tool.get_table_data('database_connections',where_clause=f"id={database_id}",limit=1).get('data')
    if datas:
        return datas[0]



def has_auto_increment_pk(table):
    # 获取主键列（仅当单列主键时判断）
    pk_columns = list(table.primary_key.columns)
    if len(pk_columns) != 1:
        return False

    pk_col = pk_columns[0]

    # 检查是否为整数类型
    if not isinstance(pk_col.type, (Integer, BigInteger, SmallInteger)):
        return False

    # 检查是否显式或隐式启用自增
    return pk_col.autoincrement is True or (
            pk_col.autoincrement != 'ignore' and
            pk_col.autoincrement is not False
    )





def get_single_filed_type_dict(metadata_str: str) -> []:
    metadata_json = json.loads(metadata_str)
    res = {}
    cols_metadata = metadata_json['tables']['table']['columns']
    for key, value in cols_metadata.items():
        sdt_type = value['sdtype']
        res[key] = sdt_type
    return res


def get_all_table_and_relationships(metadata_str: str):
    metadata = json.loads(metadata_str)

    relationships = metadata['relationships']
    table_name_list = []
    table_name_primary_key_dict = {}
    table_name_columns_dict = {}

    table_list = metadata['tables']

    for key, value in table_list.items():
        table_name_list.append(key)
        if value.get('primary_key'):
            table_name_primary_key_dict[key] = value['primary_key']

        table_name_columns_dict[key] = list(value['columns'].keys())

    return {
        'table_name_list': table_name_list,
        'table_name_primary_key_dict': table_name_primary_key_dict,
        'table_name_columns_dict': table_name_columns_dict,
        'relationships': relationships
    }


def update_database_metadata(database_id, metadata: MultiTableMetadata):
    sqlite_tool = get_sql_lite_tool()
    metadata_str = str(metadata)
    sqlite_tool.update_data('database_all_table_metadata',
                                  where_clause=f"database_id={database_id}",
                                  data={"sdv_metadata": metadata_str},params={"sdv_metadata": metadata_str})


def get_database_metadata(database_id):
    sqlite_tool = get_sql_lite_tool()
    res = sqlite_tool.get_table_data('database_all_table_metadata',
                                                         where_clause=f"database_id={database_id}")
    rows = res.get('data')
    if rows:
        all_table_metadata_dict = rows[0]
        ## load
        all_table_metadata = MultiTableMetadata.load_from_dict(
            json.loads(all_table_metadata_dict['sdv_metadata']))
        return all_table_metadata
    return None

def append_csv_data_source(data_sources):
    data_sources.append({
        "name": "CSV",
        "type": "CSV",
        "id": "CSV",
        "host": "",
        "port": "",
        "user": "",
        "password": "",
        "database": "",
    })

def remove_csv_data_source(data_sources):
    return list(filter(lambda x: x["name"] != "CSV", data_sources))

def get_data_source_by_name(data_sources,name):
    try:
        return next(s for s in data_sources if s["name"] == name)
    except:
        return None

def get_data_sources_by_type(data_sources,type):
    return list(filter(lambda x: x["type"] == type, data_sources))



def create_table_if_not_exist(source_database_id, source_table_name, target_database_id,target_table_name):
    print(source_database_id, source_table_name, target_database_id, target_table_name)
    target_tool =  get_sql_databaseTool(target_database_id)
    print(source_database_id, source_table_name, target_database_id,target_table_name)
    print('---------------------------')
    if not target_tool.has_table(target_table_name):
        # 修改表名
        source_database = get_data_source_info_by_id(source_database_id)
        source_table = get_table_metadata(source_database, source_table_name)
        target_database = get_data_source_info_by_id(target_database_id)
        copied_table = source_table.tometadata(MetaData())
        if target_database['schema_name'] is not None:
            copied_table.schema = target_database['schema_name']

        copied_table.name = target_table_name

        # 生成新 DDL
        for constraint in copied_table.constraints:
            if hasattr(constraint, 'name') and constraint.name:
                constraint.name = f"{target_table_name}_{constraint.name}"




        # 生成新DDL
        new_ddl = str(CreateTable(copied_table).compile(target_tool.engine))
        print(new_ddl)
        # 执行 DDL
        with target_tool.engine.connect() as conn:
            conn.execute(text(new_ddl))
            conn.commit()


def convert_dfs_to_csv_zip(df_dict):
    """将多个DataFrame转换为CSV并压缩到ZIP文件"""
    zip_buffer = BytesIO()
    with zipfile.ZipFile(zip_buffer, 'w') as zipf:
        for sheet_name, df in df_dict.items():
            # 创建CSV内存文件
            csv_buffer = StringIO()
            df.to_csv(csv_buffer, sep='\t', na_rep='nan', index=False)
            # 添加到ZIP
            zipf.writestr(f'{sheet_name}.csv', csv_buffer.getvalue())
    zip_buffer.seek(0)
    return zip_buffer


