from datasets import Dataset

from langsmith import EvaluationResult
from ragas import evaluate
from ragas.metrics import (
    faithfulness,
    answer_relevancy,
    context_recall,
    context_precision,
)

from evaluator.rag_agent import RagAgent
from evaluator.rag_test_desc import RagTestDesc

class RagasEvaluator:
    def __init__(self):
        pass

    def evaluate_agent(self, agent: RagAgent, test_cases: list[RagTestDesc]) -> EvaluationResult:
        questions = [test_case.question for test_case in test_cases]
        contexts = [test_case.contexts for test_case in test_cases]
        reference = [test_case.ground_truths for test_case in test_cases]
        answers = [agent.generate_answer(test_case.question, test_case.contexts) for test_case in test_cases]
        data = {
            "question": questions,
            "answer": answers,
            "contexts": contexts,
            "reference": reference
        }
        
        # Convert dict to dataset
        dataset = Dataset.from_dict(data)

        # 构建一个llm，类型为LangchainLLM，给evaluate，避免自动连接openapi，这个llm可以访问本地ollama运行的qwen2:1.5b模型
        llm = None
        
        result = evaluate(
            dataset = dataset, 
            metrics=[
                context_precision,
                context_recall,
                faithfulness,
                answer_relevancy,
            ],
            llm=llm,
            )

        return result
