import asyncio
import json
import re

from langsmith.evaluation._arunner import AsyncExperimentResults

from app.constant import LANGSMITH_SAMPLE_DATASET_NAME
from app.logger import logger
from app.monitor.client import client
from app.monitor.target import target, agent
from app.prompt.evaluator import CUSTOM_CORRECTNESS_PROMPT


def string_to_json(content: str):
    """手动解析响应的方法"""
    try:
        json_match = re.search(r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}', content)
        if json_match:
            result = json.loads(json_match.group())
            score = float(result.get('score', 0.5))
            feedback = result.get('feedback', content)
            return score, feedback
    except (json.JSONDecodeError, ValueError, KeyError):
        logger.exception("JSON decode error")

    # 最后的回退方案
    return 0.5, content


def custom_evaluator(inputs: dict, outputs: dict, reference_outputs: dict):
    """使用您自己的 LLM 实例进行评估"""

    eval_prompt = CUSTOM_CORRECTNESS_PROMPT.format(
        inputs=inputs['question'],
        reference_outputs=reference_outputs['answer'],
        outputs=outputs['answer']
    )

    # 调用模型进行评估
    response = agent.llm.client.invoke([{"role": "user", "content": eval_prompt}])

    score, feedback = string_to_json(response.content)
    return {
        "score": score,
        "feedback": feedback,
        "metadata": {
            "raw_response": response.content[:200]  # 保存前200个字符
        }
    }


def correctness_evaluator(inputs: dict, outputs: dict, reference_outputs: dict):
    evaluator = custom_evaluator
    eval_result = evaluator(
        inputs=inputs,
        outputs=outputs,
        reference_outputs=reference_outputs
    )
    logger.debug(f"eval_result: {eval_result}")
    return eval_result


async def evaluate():
    res: AsyncExperimentResults = await client.aevaluate(
        target,
        data=LANGSMITH_SAMPLE_DATASET_NAME,
        evaluators=[
            correctness_evaluator,
        ],
        experiment_prefix="first-eval-in-langsmith",
        max_concurrency=2,
    )


if __name__ == "__main__":
    asyncio.run(evaluate())
