import json
from typing import Dict, Iterator, List, TypedDict, Union
from common.rpc.assistant import assistant_pb2
from common.rpc.chat import chat_pb2
from common.rpc.paperfile import paperfile_pb2
from common.utils import decorator, logger
from langchain_core.runnables import RunnableConfig
from langgraph.graph import START, StateGraph
from langchain_openai import ChatOpenAI
from langgraph.config import get_stream_writer
from langchain_core.runnables.config import RunnableConfig
from langchain_core.messages.ai import AIMessage

from app.rag.internal.svc import svc
from app.rag.internal.logic.doc_combine import doc_combiner_factory
from .rag_graph_utils import *

# shenll todo remove
import sys

class State(TypedDict):
    question: str
    sys_msg: str
    chat_context: List[Union[Dict, AIMessage]] | None
    doc_context: str
    
    paper_basic_info: str
    abstract_summary: str
    
    thinker_question: str
    thinker_tip: str
    need_rag: str
    rag_sentence: str

class RagGraph():
    def __init__(self, svcCtx: svc.ServiceContext):
        self._svcCtx = svcCtx
        self._thinker_llm = ChatOpenAI(
            api_key=self._svcCtx.config.ali_llm_conf.api_key,
            base_url=self._svcCtx.config.ali_llm_conf.base_url,
            model=self._svcCtx.config.ali_llm_conf.model,
            # other params...
            extra_body={"enable_thinking": False}
        ).bind(response_format={"type": "json_object"})
        
        self._build_graph()
    
    def chat_stream(self, question: str, assistant_id: str, dialog_id: str, user_id: int, metadata: dict) -> Iterator[str]:
        assistant = self._svcCtx.assistant_stub.GetAssistant(
            assistant_pb2.GetAssistantReq(id=assistant_id, userId=user_id),
            metadata=metadata.items()
            ).assistantInfo
        self._svcCtx.chat_dia_stub.Get(
            chat_pb2.ChatDialogGetReq(Id=dialog_id, UserId=user_id),
            metadata=metadata.items()
            ).dialog  # 验证会话存在
        paper_file_id = assistant.assistant_setting.paperFileId
        self._svcCtx.paperfile_stub.Get(
            paperfile_pb2.GetReq(Id=paper_file_id, UserId=user_id),
            metadata=metadata.items()) # 验证文件存在
        
        msgs = self._svcCtx.chat_msg_stub.List(
            chat_pb2.ChatMsgListReq(DialogId=dialog_id, UserId=user_id, Limit=4),
            metadata=metadata.items()
            ).msgs # 获取最后两段对话
        chat_context = []
        for msg in msgs:
            if msg.Role == "ai":
                chat_context.append(AIMessage(content=msg.Content))
            elif msg.Role == "user":
                chat_context.append({
                    "role": msg.Role,
                    "content": msg.Content
                })
            else:
                logger.my_logger().error(f"invalid msg Id: {msg.Id} Role {msg.Role}")
                
        sys_msg = assistant.assistant_setting.systemMsg
        
        paper_meta = self._svcCtx.paper_segment_model.find_meta(user_id, paper_file_id)
        print(f'\nsll paper_meta: {paper_meta}\n', file=sys.stderr)
        paper_basic_info = paper_meta["paper_basic_info"]
        abstract_summary = paper_meta["abstract_summary"]

        temperature = assistant.model_setting.llm_setting.temperature
        temperature = 0.7 if temperature <= 0 else temperature
        llm = ChatOpenAI(
            api_key=self._svcCtx.config.ali_llm_conf.api_key,
            base_url=self._svcCtx.config.ali_llm_conf.base_url,
            model=self._svcCtx.config.ali_llm_conf.model,
            # other params...
            temperature=temperature
        )
        configurable = {"configurable": {"user_id": user_id, "file_id": paper_file_id, "llm": llm}}

        yield from self.stream(question, sys_msg, paper_basic_info, abstract_summary, configurable, chat_context)
    
    def stream(self, question: str, sys_msg: str, paper_basic_info: str, abstract_summary: str,
               configurable: RunnableConfig, chat_context: List[Dict] | None = None) -> Iterator[str]:
        for chunk in self._graph.stream({"question": question, "sys_msg": sys_msg, "chat_context": chat_context,
                                         "paper_basic_info": paper_basic_info, "abstract_summary": abstract_summary},
                                        configurable, stream_mode="custom"):
            yield chunk["reply_chunk"].content
    
    @decorator.log_time_handler()
    def _think(self, state: State, config: RunnableConfig):
        "RAG前思考步骤"
        paper_basic_info = state["paper_basic_info"]
        abstract_summary = state["abstract_summary"]
        user_question = state["question"]
        
        user_content = thinker_prompt.invoke({
            "paper_basic_info": paper_basic_info,
            "abstract_summary": abstract_summary,
            "user_question": user_question,
        }).messages[0].content
        
        resp = self._thinker_llm.invoke([
            {
                "role": "system",
                "content": thinker_system_msg,
            },{
                "role": "user",
                "content": user_content,
            }
        ])
        json_res = json.loads(resp.content)
        
        print(f'\n_think:\nthinker_question: {json_res["question"]}\nthinker_tip: {json_res["thinking"]}\n'
            f'need_rag: {json_res["need_rag"]}\nrag_sentence: {json_res["rag_sentence"]}\n', file=sys.stderr)
        return {
            "thinker_question": json_res["question"],
            "thinker_tip": json_res["thinking"],
            "need_rag": json_res["need_rag"], 
            "rag_sentence": json_res["rag_sentence"]
            }
    
    @decorator.log_time_handler()
    def _retrieve(self, state: State, config: RunnableConfig):
        """RAG检索步骤"""
        if state["need_rag"] == "NO":
            print(f'\n_retrieve:\ndoc_context: 未检索\n', file=sys.stderr)
            return {"doc_context": "未检索"}
        
        vec = self._svcCtx.embeddings.embed_query(state["rag_sentence"])
        user_id = config["configurable"]["user_id"]
        file_id = config["configurable"]["file_id"]
        res = self._svcCtx.paper_segment_model.search_by(user_id, file_id, vec)
        
        retrieved_docs = res[0]
        if len(retrieved_docs) == 0:
            print(f'\n_retrieve:\ndoc_context: 未找到相关内容\n', file=sys.stderr)
            return {"doc_context": "未找到相关内容"}
        
        doc_context = doc_combiner_factory(retrieved_docs[0]["splitter_id"]).combine(retrieved_docs)
        print(f'\n_retrieve:\ndoc_context: {doc_context}\n', file=sys.stderr)
        
        return {"doc_context": doc_context}
    
    @decorator.log_time_handler()
    def _generate(self, state: State, config: RunnableConfig):
        """RAG生成文本"""
        user_content = answer_prompt.invoke({
            "user_question": state["thinker_question"],
            "paper_basic_info": state["paper_basic_info"],
            "abstract_summary": state["abstract_summary"],
            "doc_context": state["doc_context"],
            "thinker_tip": state["thinker_tip"]
            }).messages[0].content
        
        messages = [
            {
                "role": "system",
                "content": answer_system_msg
            },
        ]
        if (state["chat_context"] is not None and len(state["chat_context"]) > 0):
            messages.extend(state["chat_context"])
        messages.append({
                "role": "user",
                "content": user_content
            })
        llm: ChatOpenAI = config["configurable"]["llm"]
        
        resp = llm.stream(messages)
        writer = get_stream_writer()
        for chunk in resp:
            writer({"reply_chunk": chunk})
        return {}
    
    def _build_graph(self) -> None:
        self._graph = (
            StateGraph(State)
            .add_sequence([self._think, self._retrieve, self._generate])
            .add_edge(START, "_think")
            .compile()
        )