
"""
获取数据表的结构及前N行数据，并由此给出数据表的详细描述
"""
from core.state import AnalyzeState
from langgraph.types import Command
from langgraph.config import get_stream_writer
from core.utils.mcp_function import call_tool
from core.llm import dataset_usage_analyzer
import concurrent.futures
from config import settings
from loguru import logger
from datetime import datetime, timedelta
from pathlib import Path
import json
import pandas as pd


async def schemas_node(state: AnalyzeState):

    writer = get_stream_writer()
    writer("获取数据表的详情描述")
    logger.info("获取数据表的详情描述")

    session_id = state.get('sessionId')
    cached_data_file = Path(__file__).parent.parent / 'caches' / f'{session_id}_tables.json'
    schemas = load_cached_schemas(cached_data_file)
    if len(schemas) != 0:
        logger.info(f"指标表数量：{len(schemas)}")
        return Command(update={"schemas": schemas}, goto='rewrite_query')

    logger.info("获取所有指标表.")
    indicator_tables = await call_tool(
        sse_url=settings.DATA_SERVER_URI,
        tool_name='system#biwIndicatorTable',
        tool_args={
            'sessionId': session_id
        }
    )

    if not isinstance(indicator_tables, list):
        return Command(goto='biw_query')

    for indicator_table in indicator_tables:
        try:
            res = await call_tool(
                sse_url=settings.DATA_SERVER_URI,
                tool_name='system#biwLimitDateQueryBySessionId',
                tool_args={
                    'sessionId': state.get('sessionId'),
                    'tableName': indicator_table['name'],
                    'rowNum': '50'
                }
            )
            if not isinstance(res, dict) or not isinstance(res['datasets'], list) or not isinstance(res['datasets'][0], dict):
                raise Exception("type invalid.")
            rows = res['datasets'][0]['rows']
            df = pd.DataFrame(rows)
            if not isinstance(res['datasets'][0], dict) or not isinstance(res['datasets'][0]['table'], dict):
                raise Exception("type invalid.")
            columns = res['datasets'][0]['table']['columns']
            if len(columns) != df.shape[1]:
                indicator_table['columns'] = [item for item in columns if item.get('name') in df.columns]
                indicator_table['rows'] = rows
            else:
                df.columns = [item.get('name') for item in columns]
                indicator_table['columns'] = columns
                indicator_table['rows'] = [series.to_dict() for idx, series in df.iterrows()]
        except Exception as e:
            logger.error(e)
            indicator_table['columns'] = []
            indicator_table['rows'] = []

    # 根据指标表的字段信息和topN行，对该指标表进行用途分析。
    with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
        tasks = []
        for indicator_table in indicator_tables:
            if len(indicator_table['columns']) == 0 and len(indicator_table['rows']) == 0:
                continue
            rows = indicator_table.pop('rows', [])
            tasks.append(executor.submit(dataset_usage_analyzer.invoke, indicator_table, rows))

        for future in concurrent.futures.as_completed(tasks):
            if future.exception() is not None:
                logger.info(future.exception())
                continue
            schemas.append(future.result())

    with open(cached_data_file, 'w', encoding='utf-8', newline='') as fw:
        cached_datas = {
            'date': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'schemas': schemas,
        }
        json.dump(cached_datas, fw, ensure_ascii=False, indent=4)

    writer(schemas)
    logger.info(f"指标表数量：{len(schemas)}")

    return Command(update={"schemas": schemas}, goto='rewrite_query')


def load_cached_schemas(cached_datas_file: Path):
    """加载缓存数据"""
    if cached_datas_file.exists():
        with open(cached_datas_file, 'r', encoding='utf-8') as fs:
            cached_datas = json.load(fs)
            cached_date = datetime.strptime(
                cached_datas['date'],"%Y-%m-%d %H:%M:%S"
            )
            now = datetime.now()
            if cached_date + timedelta(days=10) > now:
                return cached_datas['schemas']
    return []