import json
from typing import Dict, List, TypedDict

from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langgraph.graph import StateGraph, START, END

from .paper_abstract_extractor_utils import *

import sys
def cout(s: str):
    print(s, file=sys.stderr)

class AbstractState(TypedDict):
    paper_md_list: List[str]
    
    index: int
    text: str

    my_summary: Dict

    next_or_end: str
    read_tip: str

class PaperAbstractExtractor():
    """
    论文摘要提取器：
    负责读取论文开头的标题和摘要部分，提取出论文结构信息和摘要中的关键信息
    """
    
    first_read_tip = "这是分割后论文片段的第一段（前7200字符），论文的标题和作者极有可能出现在这个地方，输入论文片段文本为markdown格式"
    
    def __init__(self, llm: ChatOpenAI):
        self._json_llm = llm.bind(response_format={"type": "json_object"})
        self._text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=7200,
            chunk_overlap=480,
            length_function=len,
            separators=["\n\n",
                        ".", "。", "\u3002",
                        ",", "，", "\uff0c",
                        "\n",
                        " ", "\u3000", "\u200b",    # 空格/全角空格/零宽空格
                        ''],
            is_separator_regex=False,
        )
        
        self._build_graph()
        
    def extract(self, paper_md_text: str) -> Dict:
        split_text_list = self._text_splitter.split_text(paper_md_text)
        return self._workflow.invoke({"paper_md_list": split_text_list, "index": 0, "read_tip": PaperAbstractExtractor.first_read_tip})
        
    def _text_generator(self, state: AbstractState):
        text_index = state["index"]
        paper_md_list = state["paper_md_list"]
        if text_index >= len(paper_md_list):
            return {"next_or_end": "END"}
        
        return {"index": text_index+1, "text": paper_md_list[text_index], "next_or_end": "NEXT"}
        
    def _abstract_reader(self, state: AbstractState):
        """论文Abstract解析提取器"""
        text_index = state["index"]
        user_content = None
        if (text_index == 1):
            user_content = first_read_prompt.invoke({"context": state["text"], "read_tip": state["read_tip"]}).messages[0].content
        else:
            my_summary = state["my_summary"]
            # 生成论文基础信息文本
            paper_info = merge_paper_basic_info(my_summary)
            # 生成其他研究字段合并文本
            summary = merge_other_research_fields(my_summary)
            user_content = next_read_prompt.invoke({"context": state["text"], "read_tip": state["read_tip"],
                                                    "paper_info": paper_info, "summary": summary}).messages[0].content
        resp = self._json_llm.invoke([{
                "role": "system",
                "content": abstract_reader_system_msg,
            },{
                "role": "user",
                "content": user_content,
            }]
        )
        json_res = json.loads(resp.content)
        
        cout(f"\n_abstract_reader:\nindex: {text_index}\nmy_summary: {json_res}")
        return {"my_summary": json_res}
    
    def _abstract_evaluator(self, state: AbstractState):
        """评估论文Abstract是否读完"""
        my_summary = state["my_summary"]
        # 生成论文基础信息文本
        paper_info = merge_paper_basic_info(my_summary)
        # 生成其他研究字段合并文本
        summary = merge_other_research_fields(my_summary)
        user_content = judge_next_prompt.invoke({"context": state["text"],
                                                    "paper_info": paper_info, "my_summary": summary}).messages[0].content
        resp = self._json_llm.invoke([{
                "role": "system",
                "content": abstract_evaluator_system_msg,
            },{
                "role": "user",
                "content": user_content,
            }]
        )
        json_res = json.loads(resp.content)
        
        cout(f'\nabstract_evaluator:\nindex: {state["index"]}\nnext_or_end: {json_res["judgment"]}\nread_tip: {json_res["reading_suggestion"]}')
        return {"next_or_end": json_res["judgment"], "read_tip": json_res["reading_suggestion"]}
    
    def _next_route(self, state: AbstractState):
        return state["next_or_end"]
    
    def _build_graph(self):
        self._graph = StateGraph(AbstractState)

        # Add the nodes
        self._graph.add_node("text_generator", self._text_generator)
        self._graph.add_node("abstract_reader", self._abstract_reader)
        self._graph.add_node("abstract_evaluator", self._abstract_evaluator)

        # Add edges to connect nodes
        self._graph.add_edge(START, "text_generator")
        self._graph.add_conditional_edges(
            "text_generator",
            self._next_route,
            {
                "END": END,
                "NEXT": "abstract_reader",
            },
        )
        self._graph.add_edge("abstract_reader", "abstract_evaluator")
        self._graph.add_conditional_edges(
            "abstract_evaluator",
            self._next_route,
            {
                "END": END,
                "NEXT": "text_generator",
            },
        )

        # Compile the workflow
        self._workflow = self._graph.compile()
    