import re

from langchain_chroma import Chroma
from langchain_community.document_loaders import PyPDFLoader


def load_and_clean_pdf(pdf_path):
    """
    加载PDF文档并清理水印文本
    """
    # 加载PDF文档
    docs = PyPDFLoader(pdf_path).load()
    print(f"加载了 {len(docs)} 个文档页面")

    # 清理水印文本
    for doc in docs:
        # 移除水印文本
        cleaned_content = re.sub(r'百度文库\s*-\s*好好学习，天天向上\s*', '', doc.page_content)
        # 清理多余的空白行
        cleaned_content = re.sub(r'\n\s*\n', '\n\n', cleaned_content.strip())
        doc.page_content = cleaned_content

    return docs


# 使用改进的函数
docs = load_and_clean_pdf("./浦发上海浦东发展银行西安分行个金客户经理考核办法.pdf")

# 显示清理后的结果
print("清理后的文档内容预览:")
for i, doc in enumerate(docs[:3]):  # 显示前3页
    print(f"\n=== 第{i + 1}页 ===")
    print(doc.page_content[:300] + "..." if len(doc.page_content) > 300 else doc.page_content)

print("===========================\n")

import os
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.llms import Tongyi
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.retrievers import ParentDocumentRetriever
from langchain.storage import InMemoryStore

# 初始化大语言模型
DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY")
print("DASHSCOPE_API_KEY:", DASHSCOPE_API_KEY)
llm = Tongyi(
    model_name="qwen-plus",
    dashscope_api_key=DASHSCOPE_API_KEY
)

# 创建嵌入模型
embeddings = DashScopeEmbeddings(
    model="text-embedding-v3",
    dashscope_api_key=DASHSCOPE_API_KEY
)

# 创建主文档分割器
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=512)

# 创建子文档分割器
child_splitter = RecursiveCharacterTextSplitter(chunk_size=256)

# 创建向量数据库对象
vectorstore = Chroma(
    collection_name="split_parents", embedding_function=embeddings
)
# 创建内存存储对象
store = InMemoryStore()
# 创建父文档检索器
retriever = ParentDocumentRetriever(
    vectorstore=vectorstore,
    docstore=store,
    child_splitter=child_splitter,
    parent_splitter=parent_splitter,
    search_kwargs={"k": 2}
)

# 分批添加文档以避免超过DashScope的批次大小限制（最大为10）
# 我们将文档分批处理，每批最多1个文档，以确保不会超过限制
# 因为ParentDocumentRetriever会对每个文档进行分割，可能导致大量子文档
batch_size = 1
for i in range(0, len(docs), batch_size):
    batch_docs = docs[i:i + batch_size]
    print(f"正在处理第 {i//batch_size + 1} 批文档，共 {len(batch_docs)} 个文档...")
    retriever.add_documents(batch_docs)

print(f"总共处理了 {len(docs)} 个文档")

# 切割出来主文档的数量
len(list(store.yield_keys()))

from langchain.prompts import ChatPromptTemplate
from langchain.schema.runnable import RunnableMap
from langchain.schema.output_parser import StrOutputParser

# 创建prompt模板
template = """You are an assistant for question-answering tasks. 
Use the following pieces of retrieved context to answer the question. 
If you don't know the answer, just say that you don't know. 
Use two sentences maximum and keep the answer concise.
Question: {question}
Context: {context}
Answer:
"""

# 由模板生成prompt
prompt = ChatPromptTemplate.from_template(template)

# 创建 chain（LCEL langchain 表达式语言）
chain = RunnableMap({
    "context": lambda x: retriever.invoke(x["question"]),
    "question": lambda x: x["question"]
}) | prompt | llm | StrOutputParser()

query = "客户经理被投诉了，投诉一次扣多少分？"
response = chain.invoke({"question": query})
print(response)

print("==============*********************=============\n")

from datasets import Dataset

questions = [
    "客户经理被投诉了，投诉一次扣多少分？",
    "客户经理每年评聘申报时间是怎样的？",
    "客户经理在工作中有不廉洁自律情况的，发现一次扣多少分？",
    "客户经理不服从支行工作安排，每次扣多少分？",
    "客户经理需要什么学历和工作经验才能入职？",
    "个金客户经理职位设置有哪些？"
]

ground_truths = [
    "每投诉一次扣2分",
    "每年一月份为客户经理评聘的申报时间",
    "在工作中有不廉洁自律情况的每发现一次扣50分",
    "不服从支行工作安排，每次扣2分",
    "须具备大专以上学历，至少二年以上银行工作经验",
    "个金客户经理职位设置为：客户经理助理、客户经理、高级客户经理、资深客户经理"
]

answers = []
contexts = []

# Inference
for query in questions:
    answers.append(chain.invoke({"question": query}))
    contexts.append([docs.page_content for docs in retriever.get_relevant_documents(query)])

# To dict
data = {
    "user_input": questions,
    "response": answers,
    "retrieved_contexts": contexts,
    "reference": ground_truths
}

# Convert dict to dataset
dataset = Dataset.from_dict(data)
# 评测结果
from ragas import evaluate
from ragas.metrics import (
    faithfulness,
    answer_relevancy,
    context_recall,
    context_precision,
)

# 在 evaluate 调用之前添加
from ragas.llms import LangchainLLMWrapper
from ragas.embeddings import LangchainEmbeddingsWrapper

# 使用你已经创建的 Tongyi LLM 和 embeddings
ragas_llm = LangchainLLMWrapper(llm)
ragas_embeddings = LangchainEmbeddingsWrapper(embeddings)

result = evaluate(
    dataset=dataset,
    metrics=[
        context_precision,
        context_recall,
        faithfulness,
        answer_relevancy,
    ],
    llm=ragas_llm,  # 添加这一行
    embeddings=embeddings,
    # max_tokens=512,  # 设置最大 token 数
    # temperature=0.1  # 设置温度参数
)

df = result.to_pandas()
print(df)
# 将评估结果保存为 CSV 文件
df.to_csv("ragas_evaluation_results.csv", index=False)
# 将评估结果保存为 JSON 文件
df.to_json("ragas_evaluation_results.json", orient="records", force_ascii=False)
# 将评估结果保存为 Excel 文件
df.to_excel("ragas_evaluation_results.xlsx", index=False)
# 获取特定列的数据
print(df[["user_input", "response", "reference"]])
# 导出为 HTML 表格
html_table = df.to_html()
with open("ragas_evaluation_results.html", "w") as f:
    f.write(html_table)