import os

os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["LANGCHAIN_PROJECT"] = "default"

from langchain_community.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import DashScopeEmbeddings
from langchain.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
import uuid

from langchain import hub as langchain_hub
from langchain.schema import StrOutputParser
from langchain_openai import ChatOpenAI
from langchain.schema.runnable import RunnablePassthrough
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores.chroma import Chroma
from openai import OpenAI
from langchain_core.documents import Document
from langchain_community.embeddings import DashScopeEmbeddings
from langchain.prompts import PromptTemplate
from string import Template

# Load environment variables for API keys
DASHSCOPE_API_KEY = os.getenv('DASHSCOPE_API_KEY', 'sk-d4dd6e81c7e44b36a21a7c8d736e9d0c')
DEEPSEEK_API_KEY = os.getenv('DEEPSEEK_API_KEY', 'sk-8b6df4e170964def8f553aca15442d42')

file_path = os.path.join('data', 'data.md')
with open(file_path, 'r', encoding="utf-8") as f:
    docs_string = f.read()

# 使用RecursiveCharacterTextSplitter，保持文档的语义完整性
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=500,
    chunk_overlap=50,
    separators=["\n## ", "\n### ", "\n- ", "\n"],
    keep_separator=True
)
splits = text_splitter.split_text(docs_string)

# 创建向量存储
embedding = DashScopeEmbeddings(
    model="text-embedding-v3",
    dashscope_api_key=DASHSCOPE_API_KEY
)

vectorstore = Chroma.from_texts(
    texts=splits,
    embedding=embedding,
    persist_directory="./" + str(uuid.uuid4())
)
vectorstore.persist()

# 配置检索器使用混合检索策略
retriever = vectorstore.as_retriever(
    search_type="mmr",
    search_kwargs={
        "k": 3,
        "fetch_k": 5,
        "lambda_mult": 0.7
    }
)


# retriever = vectorstore.as_retriever()


class GradeDocuments(BaseModel):
    binary_score: str = Field(description="Documents are relevant to the question, 'yes' or 'yes'")


llm = ChatOpenAI(
    model="deepseek-chat",
    openai_api_key=DEEPSEEK_API_KEY,
    openai_api_base="https://api.deepseek.com/v1"
)

system = '''您是一名评分员，负责评估检索到的文档与用户问题的相关性。

测试不需要很严格。目标是过滤掉错误的检索。

如果文档包含与用户问题相关的关键字或语义含义，则将其评为相关。

只需回答"yes"或"no"，以指示文档是否与问题相关。'''

grade_prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system),
        ("human", "Retrieved document: {document} \n User question: {question}")
    ]
)

retrieval_grader = grade_prompt | llm
question = "payment_backend 服务是谁维护的"
docs = retriever.get_relevant_documents(question)

# # 观察每一个文档块的相关性判断结果
# for doc in docs:
#     print(f"doc: \n", doc.page_content, "\n")
#     response = retrieval_grader.invoke({"question": question, "document": doc.page_content})
#     print("相关性：", response.content)
#     print("\n")


# # 生成回复
from langchain import hub
from langchain_core.output_parsers import StrOutputParser

prompt = hub.pull("rlm/rag-prompt")
llm = ChatOpenAI(
    model="deepseek-chat",
    openai_api_key=DEEPSEEK_API_KEY,
    openai_api_base="https://api.deepseek.com/v1",
    temperature=0
)

# def format_docs(docs):
#     return "\n".join(doc.page_content for doc in docs)

rag_chain = prompt | llm | StrOutputParser()


# generation = rag_chain.invoke({"context": format_docs(docs), "question": question})
# print("\n最终回答：")
# print(generation)


# 评估LLM 的回答是否基于事实（文档）
class GradeHallucinations(BaseModel):
    binary_score: str = Field(description="Answer is grounded in the facts, 'yes' or 'no'")


llm = ChatOpenAI(
    model="deepseek-chat",
    openai_api_key=DEEPSEEK_API_KEY,
    openai_api_base="https://api.deepseek.com/v1",
    temperature=0
)

system = '''
    您是一名评分员，正在评估 LLM 生成是否基于一组检索到的事实/由一组检索到的事实支持。
    给出二进制分数"yes"或"no"。
    "yes"表示答案基于一组事实/由一组事实支持。
     '''
hallucination_prompt = ChatPromptTemplate.from_messages(
    [("system", system), ("human", "Set of facts: {documents} \n LLM generation: {generation}")])

hallucination_grader = hallucination_prompt | llm


# # print(hallucination_grader.invoke({"documents": docs, "generation": generation}))
# result = hallucination_grader.invoke({"documents": docs, "generation": generation})
# print(f"binary_score: {result.content}")


# 评估LLM 的回答是不是解决了用户的问题
class GradeHallucinations(BaseModel):
    binary_score: str = Field(description="Answer is grounded in the facts, 'yes' or 'no'")


answer_llm = ChatOpenAI(
    model="deepseek-chat",
    openai_api_key=DEEPSEEK_API_KEY,
    openai_api_base="https://api.deepseek.com/v1",
    temperature=0
)

system = '''
    您是一名评分员，正在评估答案是否解决了用户的问题。
    评估标准：
    1. 如果回答包含了问题所需的核心信息，应该评估为"yes"
    2. 提供额外的相关信息（如联系方式）不会影响评估结果
    3. 只有当回答没有提供问题所需的核心信息时，才评估为"no"

    给出二进制分数"yes"或"no"。
    "yes"表示答案包含了问题所需的核心信息。
     '''
answer_prompt = ChatPromptTemplate.from_messages(
    [("system", system),
     ("human", "User question: \n\n {question} \n\n LLM generation: {generation}")]
)

answer_grader = answer_prompt | answer_llm
# print(hallucination_grader.invoke({"documents": docs, "generation": generation}))
# result = answer_grader.invoke({"question": question, "generation": generation})
# print(f"binary_score: {result.content}")

# 结合知识库，重写问题（基于用户问题提出新的问题）
llm = ChatOpenAI(
    model="deepseek-chat",
    openai_api_key=DEEPSEEK_API_KEY,
    openai_api_base="https://api.deepseek.com/v1",
    temperature=0)

system = """
    您是一个问题重写器，可将输入问题转换为针对 vectorstore 检索进行了优化的更好版本 。
    查看输入并尝试推断底层语义意图/含义，使用用户语言回复，不用刻意强调某个关键词，尽量保持原意。
    """

re_write_prompt = ChatPromptTemplate.from_messages(
    [("system", system), ("human", "Here is the initial question: {question} \n Formulate an improved question.")])

question_rewriter = re_write_prompt | llm | StrOutputParser()
# print("使用 StrOutputParser 的输出：")
# print(question_rewriter.invoke({"question": question}))

# 不使用 StrOutputParser 的版本
# question_rewriter_without_parser = re_write_prompt | llm
# print("\n不使用 StrOutputParser 的输出：")
# print(question_rewriter_without_parser.invoke({"question": question}))


# 使用 LangGraph 构造 Agent
from typing import List
from typing_extensions import TypedDict


class GraphState(TypedDict):
    question: str
    generation: str
    documents: List[str]


# Nodes 节点
def retrieve(state):
    print("---RETRIEVE---")
    question = state["question"]
    documents = retriever.get_relevant_documents(question)
    return {"documents": documents, "question": question}


def generate(state):
    print("---GENERATE---")
    question = state["question"]
    documents = state["documents"]
    generation = rag_chain.invoke({"context": documents, "question": question})
    return {"documents": documents, "question": question, "generation": generation}


def grade_documents(state):
    print("----检查文档是否和问题相关----")
    question = state["question"]
    documents = state["documents"]
    filtered_docs = []
    for d in documents:
        score = retrieval_grader.invoke({"question": question, "document": d.page_content})
        grade = score.content
        if grade == "yes":
            print("文档和用户问题相关")
            filtered_docs.append(d)
        else:
            print("文档和用户问题不相关")
            continue
    return {"documents": filtered_docs, "question": question}


def transform_query(state):
    print("改写问题")
    question = state["question"]
    documents = state["documents"]
    better_question = question_rewriter.invoke({"question": question})
    print(f"LLM 改写优化后更好的提问： {better_question}")
    return {"documents": documents, "question": better_question}


# Edges
def decide_to_generate(state):
    print("访问检索到的相关知识库")
    filtered_documents = state["documents"]
    if not filtered_documents:
        print("所有的文档都不相关，重新生成问题")
        return "transform_query"
    else:
        print("文档和问题相关，生成回答")
        return "generate"


def grade_generation_v_documents_and_question(state):
    print("评估生成的回复是否基于知识库事实（是否产生了幻觉）")
    question = state["question"]
    documents = state["documents"]
    generation = state["generation"]
    score = hallucination_grader.invoke({"documents": documents, "generation": generation})
    grade = score.content
    if grade == "yes":
        print("生成的回复是基于知识库，没有幻觉")
        score = answer_grader.invoke({"question": question, "generation": generation})
        grade = score.content
        if grade == "yes":
            print("问题得到解决")
            return "useful"
        else:
            print("问题没有得到解决")
            return "not useful"
    else:
        print("生成的回复不是基于知识库，继续重试……")
        return "not supported"


from langgraph.graph import END, StateGraph, START
from IPython.display import Image, display

workflow = StateGraph(GraphState)
workflow.add_node("retrieve", retrieve)
workflow.add_node("grade_documents", grade_documents)
workflow.add_node("generate", generate)
workflow.add_node("transform_query", transform_query)

workflow.add_edge(START, "retrieve")
workflow.add_edge("retrieve", "grade_documents")
workflow.add_conditional_edges(
    "grade_documents", decide_to_generate,
    {"transform_query": "transform_query", "generate": "generate"}
)
workflow.add_edge("transform_query", "retrieve")
workflow.add_conditional_edges("generate", grade_generation_v_documents_and_question,
                               {"not supported": "generate", "useful": END, "not useful": "transform_query"})
app = workflow.compile()
display(Image(app.get_graph().draw_mermaid_png()))

from pprint import pprint

# 运行示例
inputs = {"question": "在业务负责人中，谁负责的服务数量最多？"}
# inputs = {"question": "payment_gateway 服务遇到问题，负责人是谁，该怎样联系他？"}
# inputs = {"question": "payment_gateway 系统出现安全漏洞怎么办"}

for output in app.stream(inputs):
    for key, value in output.items():
        pprint(f"Node '{key}':")
    pprint("\n---\n")
pprint(value["generation"])