import asyncio
import os
from typing import List

from pydantic import BaseModel, Field

from core.agents.factory import agent_manager
from core.llm.factory import ModelType, create_llm_provider
from core.utils.log import mylogger


class EvaluationConfig(BaseModel):
    """评测配置"""

    character_file_path: str = Field(description="角色配置文件路径")
    result_file_path: str = Field(description="对话数据文件路径")
    scenario_file_path: str = Field(description="场景配置文件路径")
    output_dir: str = Field(default="data", description="输出目录")
    model_type: ModelType = Field(
        default=ModelType.DOUBAO_SEED, description="评测使用的模型类型"
    )
    temperature: float = Field(default=0.1, description="模型温度参数")


class DialogueEvaluator:
    """对话评测器，使用新的Agent架构"""

    def __init__(self, config: EvaluationConfig):
        """初始化评测器"""
        self.config = config
        self.llm = create_llm_provider(model_type=config.model_type)

        os.makedirs(config.output_dir, exist_ok=True)
        mylogger.info(
            f"DialogueEvaluator initialized with model: {config.model_type.value}"
        )

    async def evaluate(self) -> str:
        """执行评测，使用EvalAgent"""
        try:
            mylogger.info("开始执行对话评测...")

            # 使用EvalAgent进行评测
            eval_agent = agent_manager.get_eval_agent(
                name="evaluator",
                model_type=self.config.model_type,
                output_dir=self.config.output_dir,
                temperature=self.config.temperature,
            )

            # 构建评测输入
            eval_input = {
                "character_file_path": self.config.character_file_path,
                "result_file_path": self.config.result_file_path,
                "scenario_file_path": self.config.scenario_file_path,
            }

            # 执行评测
            result = await eval_agent.process(eval_input)
            mylogger.info("对话评测完成")
            return result

        except Exception as e:
            mylogger.error(f"评测过程中出错: {e}")
            raise


async def evaluate_dialogue(
    character_file_path: str,
    result_file_path: str,
    scenario_file_path: str,
    output_dir: str = "data",
    model_type: ModelType = ModelType.DOUBAO_SEED,
    temperature: float = 0.1,
) -> str:
    """便捷的对话评测函数"""
    config = EvaluationConfig(
        character_file_path=character_file_path,
        result_file_path=result_file_path,
        scenario_file_path=scenario_file_path,
        output_dir=output_dir,
        model_type=model_type,
        temperature=temperature,
    )

    evaluator = DialogueEvaluator(config)
    return await evaluator.evaluate()


async def batch_evaluate_dialogues(
    evaluation_configs: List[EvaluationConfig],
) -> List:
    """批量评测对话"""
    results = []

    for i, config in enumerate(evaluation_configs):
        mylogger.info(f"开始第 {i+1}/{len(evaluation_configs)} 个评测任务")

        evaluator = DialogueEvaluator(config)
        result = await evaluator.evaluate()
        results.append(result)

        mylogger.info(f"第 {i+1} 个评测任务完成")

    mylogger.info(f"批量评测完成，共完成 {len(results)} 个任务")
    return results


async def run_evaluation_interactive(npc_name: str = "赵凤儿"):
    """交互式运行评测，使用新的Agent架构"""
    from pathlib import Path

    while True:
        user_input = input("是否需要进行评测？(yes/no): ").strip().lower()
        if user_input in ["yes", "y"]:
            mylogger.info(f"开始进行评测，角色: {npc_name}...")
            try:
                # 设置评测文件路径
                base_path = Path(__file__).parent.parent
                character_file_path = str(
                    base_path / "data" / "characters" / npc_name / "character.json"
                )
                result_file_path = str(base_path / "data" / "result.json")
                scenario_file_path = str(base_path / "data" / "scenario.json")
                output_dir = str(base_path / "data")

                # 使用EvalAgent进行评测
                eval_agent = agent_manager.get_eval_agent(
                    name=f"{npc_name}_evaluator",
                    model_type=ModelType.DOUBAO_SEED,
                    output_dir=output_dir,
                    temperature=0.1,
                )

                # 构建评测消息
                eval_message = {
                    "character_file_path": character_file_path,
                    "result_file_path": result_file_path,
                    "scenario_file_path": scenario_file_path,
                }

                # 执行评测
                result = await eval_agent.process(eval_message)
                mylogger.info(f"评测结果: {result}")

            except Exception as e:
                mylogger.error(f"评测过程中出现错误: {e}")
                print(f"评测失败: {e}")
            break
        elif user_input in ["no", "n"]:
            mylogger.info("跳过评测，程序退出")
            break
        else:
            print("请输入 yes 或 no")


if __name__ == "__main__":
    asyncio.run(run_evaluation_interactive())
