import os
from dotenv import load_dotenv, find_dotenv # 导入 find_dotenv 帮助定位
from langchain_community.embeddings import SentenceTransformerEmbeddings
from ragas.metrics import (
    answer_relevancy,
    faithfulness,
    context_recall,
    context_precision,
    answer_correctness,
    answer_similarity
)
from ragas import evaluate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_community.chat_models import ChatZhipuAI
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
from langchain.prompts import ChatPromptTemplate
from sentry_sdk.integrations.beam import raise_exception
from tqdm import tqdm
import pandas as pd
from datasets import Dataset

# 加载 .env 文件中的环境变量 (增强调试)
load_dotenv(dotenv_path=find_dotenv(usecwd=True), verbose=True, override=True)

# 从环境变量加载 API 密钥和基础 URL
api_key = os.getenv("OPENAI_API_KEY")
api_base = os.getenv("OPENAI_API_BASE")
zhipuai_api_key = os.getenv("ZHIPUAI_GLM4_API_KEY")
os.environ["OPENAI_API_KEY"] = api_key
os.environ["OPENAI_API_BASE"] = api_base

from pypdf import PdfReader
reader = PdfReader("D://代码//AIStudy//RAG//data//关于开展2025-2026学年校园地国家助学贷款新贷工作的通知.pdf")

#  从每个页面提取文本，并存储页码
pdf_texts = []
for page_num, page in enumerate(reader.pages):
    text = page.extract_text().strip()
    if text:
        pdf_texts.append({"page_number": page_num + 1, "content": text})

from langchain.text_splitter import RecursiveCharacterTextSplitter

#  按句子拆分文本，同时保持页码
character_splitter = RecursiveCharacterTextSplitter(
    separators=["\n\n", "\n", "。", " ",""],
    chunk_size=500,
    chunk_overlap=50
)

#  拆分每个页面的内容并将其存储在具有元数据的列表中
character_split_texts = []
for page in pdf_texts:
    chunks = character_splitter.split_text(page["content"])
    for chunk in chunks:
        character_split_texts.append({"page_number":page["page_number"], "content":chunk})

#  构建文档结构
from langchain.schema import Document

base_docs = []
for entry in character_split_texts:
    base_docs.append(Document(page_content=entry["content"], metadata={"page_number":entry["page_number"]}))

from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma

# 利用Embedding模型对每个文本片段进行向量化，并储存到向量数据库中
embed_model = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(documents=base_docs, embedding=embed_model, collection_name="openai_embed")

# 构建检索器
base_retriever = vectorstore.as_retriever(search_kwargs={"k": 2})

question_schema = ResponseSchema(
    name="question",
    type="string",
    description="一个关于上下文的问题"
)

question_response_schema = [
    question_schema,
]

question_output_parser = StructuredOutputParser.from_response_schemas(question_response_schema)
format_instructions = question_output_parser.get_format_instructions()

# print(format_instructions)

#  构建生成模型
question_generation_llm = ChatZhipuAI(model="glm-4", zhipuai_api_key=zhipuai_api_key)

bare_prompt_template = "{content}"
bare_template = ChatPromptTemplate.from_template(template=bare_prompt_template)

qa_template = """\
您是一位大学教授，正在为高级学生制定考试。对于每个背景信息，请创建一个与该上下文具体相关的问题。避免创建通用或一般性的问题。

问题：关于上下文的一个问题。

以JSON格式输出，包含以下键：
question

上下文:{context}
"""

prompt_template = ChatPromptTemplate.from_template(template=qa_template)

#  构建生成问题链
question_generation_chain = bare_template | question_generation_llm

#  构建question、ground truth、context三元组数据结构
qgc_triples = []

#  根据前10个文本块来生成问题
for text in tqdm(base_docs[:10]):
    messages = prompt_template.format_messages(
        context=text,
        format_instructions=format_instructions
    )
    response = question_generation_chain.invoke({"content": messages})

    try:
        output_dict = question_output_parser.parse(response.content)
    except Exception as e:
        print("模型生成问题时发生异常，异常原因为：", e)
        continue

    output_dict["context"] = text
    qgc_triples.append(output_dict)

#  使用生成的问题和文本块提示，让GLM4生成真实答案，并将其保存到真实数据集中。
answer_generation_llm = ChatZhipuAI(model="glm-4", zhipuai_api_key=zhipuai_api_key)

answer_schema = ResponseSchema(
    name="answer",
    type="string",
    description="一个问题的答案"
)

answer_response_schema = [
    answer_schema,
]

answer_output_parser = StructuredOutputParser.from_response_schemas(answer_response_schema)
format_instructions = answer_output_parser.get_format_instructions()

qa_template = """\
您是一位大学教授，正在为高级学生制定考试。对于每个问题和背景答案，创建一个答案。

答案：关于上下文的一个答案。

以JSON格式输出，包含以下键：
answer

问题: ｛question｝
上下文: {context}
"""

prompt_template = ChatPromptTemplate.from_template(template=qa_template)

#  根据三元组中的问题和上下文来生成答案
for triple in tqdm(qgc_triples):
    messages = prompt_template.format_messages(
        question=triple["question"],
        context=triple["content"],
        format_instructions=format_instructions
    )

    response = answer_generation_llm.invoke({"content": messages})
    try:
        output_dict = answer_output_parser.parse(response.content)
    except Exception as e:
        print("模型生成答案时发生异常，异常原因为：", e)
        continue
    triple["answer"] = output_dict["answer"]

#  保存ground truth 数据集到本地
ground_truth_qgc_set = pd.DataFrame(qgc_triples)
ground_truth_qgc_set["context"] = ground_truth_qgc_set["context"].map(lambda x: str(x.page_content))
ground_truth_qgc_set = ground_truth_qgc_set.rename(columns={"answer" : "ground_truth"})

eval_dataset = Dataset.from_pandas(ground_truth_qgc_set)
eval_dataset.to_csv("ground_truth_eval_dataset.csv")

#  使用中转API的大模型生成对应问题的答案
gen_template = """\
您是一位善于分析问题的学者，对于每个问题，可以基于RAG技术检索到相关的上下文来综合的回答别人提出的问题。

answer: 利用检索到的上下文回答问题。

以JSON格式输出，包含以下键：
answer

问题: ｛question｝
上下文: {context}
"""

prompt = ChatPromptTemplate.from_template(template=gen_template)

#  数据结构为question，answer，contexts，ground_truths
rag_dataset = []

for row in tqdm(eval_dataset):
    question = row.get("question")
    rag_chain = (
        {"context": base_retriever, "question": RunnablePassthrough()}
        | prompt
        | answer_generation_llm  #  生成answer的模型，暂时先用GLM-4替代
        | StrOutputParser()
    )
    answer = rag_chain.invoke(question)
    rag_dataset.append(
        {
            "question" : row["question"],
            "answer" : answer,
            "contexts": [base_retriever],  #  用rag检索模型检索到的上下文
            # "contexts" : [row["context"]],  #  直接用表中的上下文，会导致context_precision结果直接为1
            "ground_truths" : [row["ground_truth"]],
        }
    )

rag_df = pd.DataFrame(rag_dataset)
rag_eval_dataset = Dataset.from_pandas(rag_df)

rag_eval_dataset.to_csv("rag_dataset.csv")

def evaluate_ragas_dataset(ragas_dataset):
    result = evaluate(
        ragas_dataset,
        metrics=[
            context_precision,
            faithfulness,
            answer_relevancy,
            answer_correctness,
            answer_similarity,
            context_recall
        ],
        #  评估模型
        llm = ChatZhipuAI(model="glm-4", zhipuai_api_key=zhipuai_api_key),
        # embeddings = SentenceTransformerEmbeddings(model_name="D:\\path\\model_name"),  #  使用本地模型进行向量化
        embeddings = embed_model,
        raise_exceptions = False  #  不抛出异常
    )
    return result

#  评估
evaluation_results = evaluate_ragas_dataset(rag_eval_dataset)
print(evaluation_results)
#  可视化指标结果
print(pd.DataFrame.from_dict(evaluation_results, orient="index"))

