'''
* This is the projet for Brtc LlmOps Platform
* @Author Leon-liao <liaosiliang@alltman.com>
* @Description //TODO 
* @File: 10_final_crag.py
* @Time: 2025/11/4
* @All Rights Reserve By Brtc
'''
import dotenv
import weaviate
from langchain_community.tools import GoogleSerperRun
from langchain_community.utilities import GoogleSerperAPIWrapper
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_weaviate import WeaviateVectorStore
from langgraph.graph import StateGraph
from pydantic import BaseModel, Field
from typing_extensions import TypedDict, Any


dotenv.load_dotenv()

client = weaviate.connect_to_local("192.168.106.129", 8080)
db = WeaviateVectorStore(
    client,
    index_name="TestDemo",
    text_key="text",
    embedding=OpenAIEmbeddings(model="text-embedding-3-small")
)
retriever = db.as_retriever(search_type="mmr")
class GoogleSerperSchema(BaseModel):
    query:str = Field(description="执行谷歌搜索的查询语句")

google_serper = GoogleSerperRun(
    name = "google_serper",
    description = ("一个低成本的谷歌搜索API工具！"),
    args_schema=GoogleSerperSchema,
    api_wrapper=GoogleSerperAPIWrapper(),)


class GradeDocument(BaseModel):
    """文档评分模型"""
    binary_score:str = Field(description="文档与问题是否关联, 请回答Yes Or No")

class GraphState(TypedDict):
    """图结构对应的数据"""
    question:str # 原始问题
    generation:str # 大语言生辰内容
    web_search:str # 网络搜索内容
    documents:list[Document] # 检索到的文档列表

def format_docs(docs:list[Document])->str:
    """格式化传入的文档列表 格式为字符串"""
    return "\r\n".join([doc.page_content for doc in docs])

llm = ChatOpenAI(model="gpt-4o-mini")
#构建检索评估器
system = """  
你是一名评估检索到的文档与用户问题相关性的评估员。
如果文档包含与问题相关的关键字或语义，请将其评级为相关。
给出一个是否相关得分为yes或者no，以表明文档是否与问题相关。
"""
grade_prompt = ChatPromptTemplate.from_messages([
    ("system", system),
    ("human", "检索文档:\n\n{document}\n\n用户问题:{question}")
])
retriever_grader = grade_prompt|llm.with_structured_output(GradeDocument)
# Rag检索增强生成
template = """ 
你是一个问答任务的助理。使用以下检索到的上下文来回答问题。如果不知道就说不知道，不要胡编乱造，并保持答案简洁。
问题: {question}
上下文: {context}
答案:
"""
prompt = ChatPromptTemplate.from_template(template)
rag_chain = prompt|llm.bind(temperature=0)|StrOutputParser()
#网络问题检索链
rewrite_prompt = ChatPromptTemplate.from_messages([
    ("system","你是一个将输入问题转换为优化的更好版本的问题重写器并用于网络搜索。请查看输入并尝试推理潜在的语义意图/含义。"),
    ("human", "这里是初始化问题:\n\n{question}\n\n请尝试提出一个改进问题。")
])
question_rewrite = rewrite_prompt|llm.bind(temperature=0)|StrOutputParser()
# 构建图的 要素
def  retriever_data(state:GraphState)->Any:
    """检索节点，根据用户问题进行向量数据库检索"""
    print("-------检索节点-----------")
    question = state["question"]
    documents = retriever.invoke(question)
    return {"documents":documents, "question":question}

def generate(state:GraphState)->Any:
    """生成节点， 根据原始问题+ 检索上文内容调用LLM生成内容"""
    print("------生成节点-----------")
    question = state["question"]
    documents = state["documents"]
    generation = rag_chain.invoke({
        "context":format_docs(documents),
        "question":question,
    })

    return {
        "question":question,
        "documents":documents,
        "generation":generation,
    }

def grade_documents(state:GraphState)->Any:
    """文档与原始问题关联性评分节点"""
    print("---------检测文档预问题是否存在关联-----------")
    question = state["question"]
    documents = state["documents"]
    filter_docs = []
    web_search = "no"

    for doc in documents:
        score:GradeDocument = retriever_grader.invoke({
            "question":question,
            "document":doc.page_content,
        })
        grade = score.binary_score
        if grade.lower() == "yes":
            print("--------文档存在关联------------")
            filter_docs.append(doc)
        else:
            print("--------文档不存在关联------------")
            web_search = "yes"
            continue
    return {**state, "documents":filter_docs, "web_search":web_search}

def web_search(state:GraphState)->Any:
    """网络检索"""
    print("-----------网络检索节点-----------")
    question = state["question"]
    documents = state["documents"]
    search_content = google_serper.invoke({"query":question})
    documents.append(Document(page_content=search_content))
    return {**state, "documents":documents}

def transformer_query(state:GraphState)->Any:
    """重写转换节点"""
    print("----------重写转换节点-----------")
    question = state["question"]
    better_question = question_rewrite.invoke({"question":question})
    return {**state, "question":better_question}


def decide_to_generate(state:GraphState)->Any:
    """决定生成还是搜索节点"""
    print("----------决定生成还是搜索节点-----------")
    web_search = state["web_search"]
    if web_search.lower() == "yes":
        print("-------执行搜索-----------")
        return "transformer_query"
    else:
        return "generate"
""" 
                                                                      问题转化------- 网络检索--------    
                                                                      |                           |
开始  ------>  retriver（向量数据库检索） ----->  打分(GradeDoc)  -----> 判断（是都相关） ---->（相关） llm生成 --- > 结束 
 
"""

# 构建工作流程
work_flow = StateGraph(GraphState)
# 定义节点
work_flow.add_node("retriever", retriever_data)
work_flow.add_node("generate", generate)
work_flow.add_node("grade_documents", grade_documents)
work_flow.add_node("web_search_node", web_search)
work_flow.add_node("transformer_query", transformer_query)

# 添加工作流得边
work_flow.set_entry_point("retriever")
work_flow.add_edge("retriever", "grade_documents")
work_flow.add_conditional_edges("grade_documents", decide_to_generate)
work_flow.add_edge("transformer_query", "web_search_node")
work_flow.add_edge("web_search_node", "generate")
work_flow.set_finish_point("generate")

app = work_flow.compile()
print(app.invoke({"question":"什么是 LLMOPS？"}))
client.close()








