import json

import pandas as pd

from ragas import  evaluate
from ragas.metrics import (
    answer_relevancy,
    context_precision,
    context_recall,
    faithfulness,
    answer_similarity,
    answer_correctness,
)
from datasets import  Dataset

from langchain_ollama import ChatOllama, OllamaLLM, OllamaEmbeddings

with open("rag_evaluate_data.json", "r", encoding="utf-8") as f:
    data = json.load(f)

print(f"加载评估样本的数量：{len(data)}")
# 转换数据格式，为dataset
eval_data = {
    "question": [item["question"] for item in data],
    "answer": [item["answer"] for item in data],
    "contexts": [item["context"] for item in data],
    "ground_truth": [item["ground_truth"] for item in data]
}

dataset = Dataset.from_dict(eval_data)
print(f"评估数据集的前5条样本：{dataset}")
#  配置评估模型
llm = OllamaLLM(model="qwen3:1.7b", base_url="localhost:11434")
# 初始化嵌入模型，用于计算上下文的向量表示
embeddings = OllamaEmbeddings(model="qwen3:1.7b", base_url="localhost:11434")
# 执行评估， 使用ragas评估指标
result = evaluate(
    dataset=dataset,
    metrics=[
        answer_relevancy,
        context_precision,
        context_recall,
        faithfulness,
        answer_similarity,
        answer_correctness,
    ],
    llm=llm,
    embeddings=embeddings,
)


# 评估结果：{'answer_relevancy': nan, 'context_precision': nan, 'context_recall': nan, 'faithfulness': nan, 'semantic_similarity': 0.9535, 'answer_correctness': nan}

print(f"评估结果：{result}")
