# LangChain supports many other chat models. Here, we're using Ollama
import csv
from typing import List

from datasets import load_dataset, Dataset
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_community.chat_models import ChatOllama
from langchain_community.embeddings import XinferenceEmbeddings
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from ragas.embeddings import BaseRagasEmbeddings, LangchainEmbeddingsWrapper
from ragas.llms import BaseRagasLLM, LangchainLLMWrapper
from langfuse.callback import CallbackHandler
from ragas.metrics import (
    answer_relevancy,
    faithfulness,
    context_recall,
    context_precision,
)

from src.ragas.evaluation import evaluate
from src.ragas.run_config import RunConfig

langfuse_handler = CallbackHandler(
    secret_key="sk-lf-00670216-9cab-4849-9579-b35b2b5fafd3",
    public_key="pk-lf-0d7d54d1-cdc7-472a-96be-ec3871c07040",
    host="http://192.168.2.17:3000",  # 🇪🇺 EU region
)


def get_answer_from_llm(question: str) -> str:
    llm = ChatOllama(model="qwen:14b", base_url="http://192.168.2.17:11434")
    prompt = ChatPromptTemplate.from_template(question)
    chain = prompt | llm | StrOutputParser()

    return chain.invoke({"input": "<user_input>"}, config={"callbacks": [langfuse_handler]})


def csv_to_columnar_dict(file_path: str) -> dict:
    with open(file_path, 'r', encoding='utf-8') as f:
        question = []
        answer = []
        reader = csv.reader(f)
        next(reader)
        count = 0
        for row in reader:
            # 跳过第一次循环
            if count > 5:
                break
            question.append(row[0])
            answer.append(row[2])
            data = {"question": question, "ground_truth": answer}
            count += 1
    return data


def get_datasets(datasets: dict) -> dict:
    # 遍历list
    data = []
    all_contexts = []
    for item in datasets["question"]:
        context = []
        answer = get_answer_from_llm(item)
        data.append(answer)
        context.append(answer)
        all_contexts.append(context)

    datasets["contexts"] = all_contexts
    datasets["answer"] = data
    return datasets


# 本地模型llm
# class QwenOllamaLLM(BaseRagasLLM):
#     def __init__(self, ollama_url: str, model_name: str = "qwen"):
#         self.ollama_url = ollama_url
#         self.model_name = model_name
#
#     def generate(self, prompt: str, **kwargs) -> List[str]:
#         """Send request to Ollama and get response."""
#         # 实现具体的HTTP请求逻辑，使用self.ollama_url和self.model_name
#         pass
#
# llm=QwenOllamaLLM(ollama_url="http://192.168.2.17:11434", model_name="qwen:14b")

#  本地embeddings模型
# langchain_llm=LangchainLLMWrapper(langchain_llm=ChatOllama(model="qwen:14b",base_url="http://192.168.2.17:11434"))
# langchain_embeddings=LangchainEmbeddingsWrapper(embeddings=XinferenceEmbeddings(
#     server_url="http://192.168.2.17:9997", model_uid="bce-embedding-base_v1"
# ))
#
#
#

# # Your Langchain code
#
# # Add Langfuse handler as callback (classic and LCEL)
# # dataset=load_dataset()
# # dataset()
# evaluation = evaluate(
#     dataset=dataset,
#     metrics=[
#         context_precision,
#         faithfulness,
#         answer_relevancy,
#         context_recall,
#     ],
#     callbacks=[langfuse_handler],
# )


if __name__ == '__main__':
    data = csv_to_columnar_dict('../../CoT_zh/CoT_Chinese_data.csv')
    datasets = get_datasets(data)
    # print(datasets)
    dataRagas = Dataset.from_dict(datasets)
    # print(dataRagas)
    #
    llm = ChatOllama(model="qwen:14b", base_url="http://192.168.2.17:11434")
    embedding = XinferenceEmbeddings(
        server_url="http://192.168.2.17:9997", model_uid="bce-embedding-base_v1"
    )
    # runConfig = RunConfig(timeout=600)
    result = evaluate(dataset=dataRagas, metrics=[answer_relevancy,
                                                  faithfulness,
                                                  context_recall,
                                                  context_precision],
                      llm=llm, embeddings=embedding)
    # 导出为csv
    result.to_pandas().to_csv("./result.csv", index=False)
    print(result.to_pandas())
