
"""
调用PandasAI执行子任务的数据分析
"""
from core.data_analyze.state import AnalyzeState
from core.data_analyze.utils import (
    create_pai_agent, analyze_task, service_provider
)
from langchain_core.messages import AIMessage
from langgraph.types import Command
from langgraph.config import get_stream_writer
import concurrent.futures
from loguru import logger
from pathlib import Path
import pandas as pd
import json


def data_analyze_node(state: AnalyzeState):

    writer = get_stream_writer()
    writer("## **调用PandasAI执行数据分析**")
    logger.info("## **调用PandasAI执行数据分析**")

    decomposed_tasks = state.get('decomposed_tasks')
    agent = create_pai_agent(state.get('datasets'))
    analyze_results = pool_analyze_tasks(agent, decomposed_tasks, retry_times=3)

    if len(analyze_results) != 0:
        analyze_content = '\n\n'.join(analyze_results)
        writer(analyze_content)
        logger.info(analyze_content)

        return Command(
            update={
                "messages": [AIMessage(content=analyze_content, name='data_analyze')],
                "analyze_content": analyze_content
            },
            goto="report",
        )
    else:
        return Command(goto='__end__')


def pool_analyze_tasks(agent, decomposed_tasks, retry_times):
    """ 多线程处理分析任务 """

    with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
        pool_tasks = dict()
        for idx, task in enumerate(decomposed_tasks):
            task_name = task.get('task_name')
            ori_type = task.get('out_type', 'string')
            out_type = ori_type if ori_type != 'json' else 'dataframe'
            task = executor.submit(analyze_task, agent, task_name, out_type, retry_times)
            pool_tasks[task] = (task_name, ori_type)

        analyze_results = []
        for future in concurrent.futures.as_completed(pool_tasks):
            if future.exception() is not None:
                logger.error(future.exception())
                continue
            output = future.result()
            if output is None or output.value is None or len(output.value) == 0:
                continue
            task_name, ori_type = pool_tasks[future]
            if output.type in ['string', 'number']:
                analyze_results.append(f'- **{task_name}**\n')
                analyze_results.append(str(output.value))
            elif output.type == 'dataframe':
                if isinstance(output.value, pd.DataFrame) and not output.value.empty:
                    analyze_results.append(f'- **{task_name}**\n')
                    if ori_type == 'json':
                        rows = [row.to_dict() for idx, row in output.value.head(100).iterrows()]
                        json_string = json.dumps(rows, ensure_ascii=False, indent=4)
                        analyze_results.append(f'```json\n{json_string}\n```')
                    else:
                        analyze_results.append(output.value.head(100).to_markdown())
            elif output.type in ['plot', 'chart']:
                if not Path(str(output.value)).exists():
                    continue
                if str(output.value).endswith('.csv'):
                    df = pd.read_csv(str(output.value))
                    if df.empty:
                        continue
                    analyze_results.append(f'- **{task_name}**\n')
                    analyze_results.append(df.head(100).to_markdown())
                else:
                    file_url = service_provider.upload_file(str(output.value))
                    analyze_results.append(f'- **{task_name}**\n')
                    analyze_results.append(f'![]({file_url})')
            else:
                logger.error(f'不支持的输出类型: {output.type}')

        return analyze_results
