from datasets import load_dataset
from ragas import EvaluationDataset
from ragas.metrics import LLMContextRecall, Faithfulness, FactualCorrectness, SemanticSimilarity, ResponseRelevancy
from ragas import evaluate
from ragas.llms import LangchainLLMWrapper
from ragas.embeddings import LangchainEmbeddingsWrapper
from langchain_openai import ChatOpenAI, OpenAIEmbeddings

dataset = load_dataset("explodinggradients/amnesty_qa","english_v3")
eval_dataset = EvaluationDataset.from_hf_dataset(dataset["eval"])

# run only one example
eval_dataset = eval_dataset[:1]

evaluator_llm = LangchainLLMWrapper(ChatOpenAI(model="gpt-4o"))
evaluator_embeddings = LangchainEmbeddingsWrapper(OpenAIEmbeddings())

metrics = [
    LLMContextRecall(llm=evaluator_llm), 
    ResponseRelevancy(llm=evaluator_llm),
    Faithfulness(llm=evaluator_llm),
]
results = evaluate(dataset=eval_dataset, metrics=metrics)

df = results.to_pandas()

for index, row in df.iterrows():
    for column in df.columns:
        print(f"{column}:   {row[column]}")
    print()  # Add a blank line between rows

# print("\nEvaluation Results:")
# print(df)

df.head()