from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from prompt.extract_graph import EXTRACT_GRAPH_PROMPT
import re
import json

class ExtractGraph():
    def __init__(self, model: ChatOpenAI):
        self.model = model

    def extract_json_from_response(self, response_text: str) -> dict:
        """提取并解析JSON的工具方法"""
        json_pattern = r"```json\s*(.*?)\s*```"
        match = re.search(json_pattern, response_text, re.DOTALL)
        
        if not match:
            try:
                return json.loads(response_text.strip())
            except json.JSONDecodeError:
                raise ValueError("未找到有效JSON内容")
        
        json_str = match.group(1).strip()
        try:
            return json.loads(json_str)
        except json.JSONDecodeError as e:
            raise ValueError(f"JSON解析失败：{e}")
        
    def extract_graph(self, word: str):
        prompt_template = ChatPromptTemplate.from_messages([
            ("system", "你是一位资深专业知识的提取专家，核心职责是从文本中高效提取结构化、高价值的核心知识，并以清晰、严谨的形式呈现。\\no_think"),
            ("human", EXTRACT_GRAPH_PROMPT)
        ])

        prompt = prompt_template.invoke({
            "extrct_graph_prompt": word
        })

        try_times = 3
        json_ans = None
        for i in range(try_times):
            try:
                answer = self.model.invoke(prompt)
                json_ans = self.extract_json_from_response(answer.content)
                break
            except:
                print(f"json读取失败第{i}次")

        print(json_ans)
        return json_ans
        # for chunk in self.model.stream(prompt):
        #     # chunk.content 是当前流式返回的片段
        #     chunk_content = chunk.content or ""
        #     # 逐段打印（end="" 不换行，flush=True 强制刷新输出）
        #     print(chunk_content, end="", flush=True)

