from pydantic import BaseModel, Field
from typing import Literal
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.vectorstores import InMemoryVectorStore
from langchain.tools.retriever import create_retriever_tool
from langchain_community.embeddings import DashScopeEmbeddings
from langgraph.graph import MessagesState
from langchain.chat_models import init_chat_model
from langgraph.graph import StateGraph, START, END
from langgraph.prebuilt import ToolNode
from langgraph.prebuilt import tools_condition
import streamlit as st
import os

# 加载文件Fetch documents to use in our RAG system. 
with st.spinner("加载文件...", show_time=True):
    try:
        urls = [
            "https://lilianweng.github.io/posts/2024-11-28-reward-hacking/",
            "https://lilianweng.github.io/posts/2024-07-07-hallucination/",
            "https://lilianweng.github.io/posts/2024-04-12-diffusion-video/",
        ]
        docs = [WebBaseLoader(url).load() for url in urls]
        st.success('文件加载成功', icon="✅")
        with st.expander("文件加载预览"):
            docs[0][0].page_content.strip()[:1000]
    except Exception:
        st.error('文件加载失败', icon="🚨")
        st.stop()

# 2文件分割 Split the fetched documents into smaller chunks for indexing into our vectorstore:
with st.spinner("文件分割...", show_time=True):
    try:
        docs_list = [item for sublist in docs for item in sublist]
        text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=100, chunk_overlap=50)
        doc_splits = text_splitter.split_documents(docs_list)
        st.success('文件分割成功', icon="✅")
        with st.expander("文件加载预览"):
            doc_splits[0].page_content.strip()
    except Exception:
        st.error('文件分割失败', icon="🚨")
        st.stop()


# 接入向量模型
with st.spinner("接入向量模型...", show_time=True):
    try:
        embeddings = DashScopeEmbeddings(model="text-embedding-v3",dashscope_api_key="sk-8d7ac86d58cd49c8966a0aeea75f1c28")
        st.success('接入向量模型成功', icon="✅")
        with st.expander("接入向量模型"):
            embeddings
    except Exception:
        st.error('接入向量模型失败', icon="🚨")
        st.stop()

# 向量工具
# 创建向量检索工具
with st.spinner("创建向量检索工具...", show_time=True):
    try:
        vectorstore = InMemoryVectorStore.from_documents(documents=doc_splits, embedding=embeddings)
        retriever = vectorstore.as_retriever()
        retriever_tool = create_retriever_tool(
            retriever,
            "retrieve_blog_posts",
            "Search and return information about Lilian Weng blog posts.",
        )
        st.success('创建向量检索工具成功', icon="✅")
        with st.expander("测试向量检索工具"):
            retriever_tool.invoke({"query": "types of reward hacking"})
    except Exception:
        st.error('创建向量检索工具失败', icon="🚨")
        st.stop()



# 创建向量检索工具
with st.spinner("接入deepseek...", show_time=True):
    try:
        # 设置DeepSeek API密钥
        os.environ["DEEPSEEK_API_KEY"] = "sk-9cc450ae42c04f3a8b4c46fc56f6d295"
        # 初始化响应模型和评估模型
        response_model = init_chat_model("deepseek-chat", model_provider="deepseek")
        grader_model = init_chat_model("deepseek-chat", model_provider="deepseek")
        st.success('接入deepseek成功', icon="✅")
    except Exception:
        st.error('接入deepseek失败', icon="🚨")
        st.stop()



def generate_query_or_respond(state: MessagesState):
    """Call the model to generate a response based on the current state. Given
    the question, it will decide to retrieve using the retriever tool, or simply respond to the user.
    """
    response = (
        response_model
        .bind_tools([retriever_tool]).invoke(state["messages"])
    )
    return {"messages": [response]}

if st.button("Try it on a random input"):
    input = {"messages": [{"role": "user", "content": "hello!"}]}
    stream_data = generate_query_or_respond(input)["messages"][-1].pretty_print()
    st.write_stream(stream_data)

if st.button("Ask a question that requires semantic search:"):
    input = {
        "messages": [
            {
                "role": "user",
                "content": "What does Lilian Weng say about types of reward hacking?",
            }
        ]
    }
    stream_data = generate_query_or_respond(input)["messages"][-1].pretty_print()
    st.write_stream(stream_data)


GRADE_PROMPT = (
    "You are a grader assessing relevance of a retrieved document to a user question. \n "
    "Here is the retrieved document: \n\n {context} \n\n"
    "Here is the user question: {question} \n"
    "If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant. \n"
    "Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question."
)
class GradeDocuments(BaseModel):
    """Grade documents using a binary score for relevance check."""
    binary_score: str = Field(
        description="Relevance score: 'yes' if relevant, or 'no' if not relevant"
    )

def grade_documents(state: MessagesState,) -> Literal["generate_answer", "rewrite_question"]:
    """Determine whether the retrieved documents are relevant to the question."""
    question = state["messages"][0].content
    context = state["messages"][-1].content
    prompt = GRADE_PROMPT.format(question=question, context=context)
    response = (
        grader_model
        .with_structured_output(GradeDocuments).invoke(
            [{"role": "user", "content": prompt}]
        )
    )
    score = response.binary_score
    if score == "yes":
        return "generate_answer"
    else:
        return "rewrite_question"


if st.button("Run this with irrelevant documents in the tool response:"):
    input = {
        "messages": convert_to_messages(
            [
                {
                    "role": "user",
                    "content": "What does Lilian Weng say about types of reward hacking?",
                },
                {
                    "role": "assistant",
                    "content": "",
                    "tool_calls": [
                        {
                            "id": "1",
                            "name": "retrieve_blog_posts",
                            "args": {"query": "types of reward hacking"},
                        }
                    ],
                },
                {"role": "tool", "content": "meow", "tool_call_id": "1"},
            ]
        )
    }
    stream_data =grade_documents(input)
    st.write_stream(stream_data)


if st.button("Confirm that the relevant documents are classified as such:"):
    input = {
        "messages": convert_to_messages(
            [
                {
                    "role": "user",
                    "content": "What does Lilian Weng say about types of reward hacking?",
                },
                {
                    "role": "assistant",
                    "content": "",
                    "tool_calls": [
                        {
                            "id": "1",
                            "name": "retrieve_blog_posts",
                            "args": {"query": "types of reward hacking"},
                        }
                    ],
                },
                {
                    "role": "tool",
                    "content": "reward hacking can be categorized into two types: environment or goal misspecification, and reward tampering",
                    "tool_call_id": "1",
                },
            ]
        )
    }
    stream_data =grade_documents(input)
    st.write_stream(stream_data)



REWRITE_PROMPT = (
    "Look at the input and try to reason about the underlying semantic intent / meaning.\n"
    "Here is the initial question:"
    "\n ------- \n"
    "{question}"
    "\n ------- \n"
    "Formulate an improved question:"
)


def rewrite_question(state: MessagesState):
    """Rewrite the original user question."""
    messages = state["messages"]
    question = messages[0].content
    prompt = REWRITE_PROMPT.format(question=question)
    response = response_model.invoke([{"role": "user", "content": prompt}])
    return {"messages": [{"role": "user", "content": response.content}]}


if st.button("rewrite_question"):
    input = {
        "messages": convert_to_messages(
            [
                {
                    "role": "user",
                    "content": "What does Lilian Weng say about types of reward hacking?",
                },
                {
                    "role": "assistant",
                    "content": "",
                    "tool_calls": [
                        {
                            "id": "1",
                            "name": "retrieve_blog_posts",
                            "args": {"query": "types of reward hacking"},
                        }
                    ],
                },
                {"role": "tool", "content": "meow", "tool_call_id": "1"},
            ]
        )
    }
    response = rewrite_question(input)
    stream_data =grade_documents(response["messages"][-1]["content"])
    st.write_stream(stream_data)


GENERATE_PROMPT = (
    "You are an assistant for question-answering tasks. "
    "Use the following pieces of retrieved context to answer the question. "
    "If you don't know the answer, just say that you don't know. "
    "Use three sentences maximum and keep the answer concise.\n"
    "Question: {question} \n"
    "Context: {context}"
)


def generate_answer(state: MessagesState):
    """Generate an answer."""
    question = state["messages"][0].content
    context = state["messages"][-1].content
    prompt = GENERATE_PROMPT.format(question=question, context=context)
    response = response_model.invoke([{"role": "user", "content": prompt}])
    return {"messages": [response]}


if st.button("generate_answer"):
    input = {
        "messages": convert_to_messages(
            [
                {
                    "role": "user",
                    "content": "What does Lilian Weng say about types of reward hacking?",
                },
                {
                    "role": "assistant",
                    "content": "",
                    "tool_calls": [
                        {
                            "id": "1",
                            "name": "retrieve_blog_posts",
                            "args": {"query": "types of reward hacking"},
                        }
                    ],
                },
                {
                    "role": "tool",
                    "content": "reward hacking can be categorized into two types: environment or goal misspecification, and reward tampering",
                    "tool_call_id": "1",
                },
            ]
        )
    }
    response = generate_answer(input)
    st.write_stream(response["messages"][-1].pretty_print())


workflow = StateGraph(MessagesState)
# Define the nodes we will cycle between
workflow.add_node(generate_query_or_respond)
workflow.add_node("retrieve", ToolNode([retriever_tool]))
workflow.add_node(rewrite_question)
workflow.add_node(generate_answer)
workflow.add_edge(START, "generate_query_or_respond")

# Decide whether to retrieve
workflow.add_conditional_edges(
    "generate_query_or_respond",
    # Assess LLM decision (call `retriever_tool` tool or respond to the user)
    tools_condition,
    {
        # Translate the condition outputs to nodes in our graph
        "tools": "retrieve",
        END: END,
    },
)
# Edges taken after the `action` node is called.
workflow.add_conditional_edges(
    "retrieve",
    # Assess agent decision
    grade_documents,
)
workflow.add_edge("generate_answer", END)
workflow.add_edge("rewrite_question", "generate_query_or_respond")

# Compile
graph = workflow.compile()


prompt = st.chat_input("Say something")
if prompt:
    st.write(f"User has sent the following prompt: {prompt}")