# 大模型的基类
from langchain.llms.base import LLM

# ragas 默认内部使用的openAI的apikey 里面的向量服务，大模型服务
from openai import OpenAI
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
import chromadb
from datasets import Dataset
from ragas.metrics import (
    faithfulness,  # 忠实度
    answer_relevancy,  # 答案和相关性
    context_recall,  # 上下文招回率
    context_precision,  # 上下文精度
)
from ragas import evaluate, RunConfig
from pydantic import PrivateAttr


class LocalEmbedding:
    def __init__(self, model_name="D:/aproject\models\all-MiniLM-L6-v2"):
        self.embedding_model = HuggingFaceEmbeddings(
            model_name=model_name,  # 模型名称
            model_kwargs={"device": "cpu"},  # 设备
            encode_kwargs={"normalize_embedding": True},  # 归一化
        )

    def get(self):
        return self.embedding_model


class LangchainLLM(LLM):
    _client = PrivateAttr()
    _model = PrivateAttr()

    def __init__(
        self, base_url="http://localhost:11434/v1", api_key="gemma3", model="gemma3"
    ):
        super().__init__()
        self._client = OpenAI(base_url=base_url, api_key=api_key)
        self._model = model

    def _call(self, prompt, stop=None, run_manager=None, **kwargs):
        completion = self._client.completions.create(
            model=self._model,
            prompt=prompt,
            temperature=kwargs.get("temperature", 0.1),
            top_p=kwargs.get("top_p", 0.9),
            max_tokens=kwargs.get("max_tokens", 4096),
            stream=kwargs.get("stream", False),
        )
        return completion.choices[0].text

    @property
    def _llm_type(self):
        return self._model


class Rag:
    def __init__(
        self,
        db_name="product_db",  # chroma数据库的名称
        embedding_model=None,  # embedding模型
        llm=None,  # 大模型服务
        prompt_template=None,  # 提示词模板
        chroma_client=None,  # chroma数据库客户端
    ):
        self.embedding_model = embedding_model or LocalEmbedding().get()
        self.llm = llm or LangchainLLM()
        self.chroma_client = chroma_client or chromadb.PersistentClient(
            path="./chromadb"
        )
        self.db = Chroma(db_name, self.embedding_model, client=self.chroma_client)
        self.prompt_template = prompt_template or (
            "你是电子产品售后服务助手，熟悉手机、电脑等产品的保修、维修、退换货等政策。"
            "请根据提供的上下文信息context，专业、简明地回答用户的售后相关问题。"
            "如果上下文没有相关信息，请回答[请联系品牌官方售后或客服]。\n"
            '问题：{question}\n"{context}"\n答复：'
        )

    def retrieve_context(self, query, top_key=3):
        docs = self.db.similarity_search(query, top_key)
        context_list = [
            f"上下文{i+1}:{doc.page_content}\n" for i, doc in enumerate(docs)
        ]
        return "\n".join(context_list), context_list

    def build_prompt(self, question, context):
        return self.prompt_template.replace("{question}", question).replace(
            "{context}", context
        )

    def answer(self, question, top_k=3):
        context_str, context_list = self.retrieve_context(question, top_k)
        prompt = self.build_prompt(question, context_str)
        response = self.llm.invoke(prompt, stream=False)
        return response, context_list


def main():
    questions = [
        "如何申请手机保修服务？",
        "笔记本电脑电池鼓包怎么办？",
    ]
    # 定义每个问题的标准答案
    ground_truths = [
        "您可携带购机发票和保修卡前往品牌授权售后服务中心，工程师检测后符合保修政策即可免费维修。",
        "如发现笔记本电脑电池鼓包，请立即停止使用并联系品牌售后服务中心进行更换，切勿自行拆卸或继续充电。",
    ]
    rag = Rag()
    answers, contexts = [], []
    for q in questions:
        answer, context_list = rag.answer(q, top_k=3)
        print(answer)
        print(context_list)
        answers.append(answer)
        contexts.append(context_list)
    data = {
        "question": questions,
        "answer": answers,
        "contexts": contexts,
        "ground_truths": ground_truths,
        "reference": ground_truths,
    }
    print("data", data)
    dataset = Dataset.from_dict(data)
    print(dataset.columns)
    # 配置评测参数
    config = RunConfig(
        timeout=1200, log_tenacity=True  # 指定超时的时间  # 指定记录重试次数
    )
    eval_llm = LangchainLLM()
    eval_embedding = LocalEmbedding().get()
    result = evaluate(
        dataset=dataset,
        llm=eval_llm,  # 使用自定义的llm服务
        embeddings=eval_embedding,  # 使用自己提供的embedding服务
        metrics=[
            context_precision,  # 上下文的精确程度
            context_recall,  #  上下文的召回率
            faithfulness,  # 忠实度
            answer_relevancy,  # 答案相关性
        ],
        raise_exceptions=True,  # 如果发生了错误是否抛出异常
        run_config=config,  # 评测的配置
    )
    print(result)
    df = result.to_pandas()
    # 打印ragas评测结果日志
    print("[MAIN] ragas评测结果：")
    # 打印DataFrame结果
    print(df)


if __name__ == "__main__":
    main()
