from typing import List, Dict
from pydantic.v1 import BaseModel, Field
from src.llm import get_llm
from src.shared.constants import MODEL_VERSIONS
from langchain_core.prompts import ChatPromptTemplate
import json
import logging

class Schema(BaseModel):
    """Knowledge Graph Schema."""
    labels: List[str] = Field(description="list of node labels or types in a graph schema")
    relationshipTypes: List[str] = Field(description="list of relationship types in a graph schema")

PROMPT_TEMPLATE_WITH_SCHEMA = """
You are an expert in schema extraction, especially for extracting graph schema information from various formats.
Generate the generalized graph schema based on input text. Identify key entities and their relationships.

Please output your response in the following JSON format:
{{
    "labels": ["label1", "label2", ...],
    "relationshipTypes": ["relationType1", "relationType2", ...]
}}

For the tobacco manufacturing domain text, extract entities like:
- ProcessStage (e.g. 烟片加料, 贮叶, 叶丝加料, 掺配)
- Parameter (e.g. 温度, 湿度, 含水率, 开度)
- Condition (e.g. 正常范围, 异常状态)
- Effect (e.g. 品质影响, 风险)

And relationships like:
- HAS_PARAMETER
- INFLUENCES
- LEADS_TO
- PRECEDES

Only return the JSON response, no other text.
"""

PROMPT_TEMPLATE_WITHOUT_SCHEMA = """
You are an expert in schema extraction, especially for deriving graph schema information from example texts.
Analyze the following text and extract only the types of entities and relationships.

Please output your response in the following JSON format:
{{
    "labels": ["label1", "label2", ...],
    "relationshipTypes": ["relationType1", "relationType2", ...]
}}

Only return the JSON response, no other text.
"""

def schema_extraction_from_text(input_text: str, model: str, is_schema_description_cheked: bool) -> Dict:
    try:
        llm, model_name = get_llm(model)
        
        # 打印 LLM 详细信息
        logging.info(f"使用的 LLM 模型信息:")
        logging.info(f"- 模型名称: {model_name}")
        logging.info(f"- 模型类型: {type(llm).__name__}")
        logging.info(f"- 模型配置: {llm.__dict__}")
        
        schema_prompt = PROMPT_TEMPLATE_WITH_SCHEMA if is_schema_description_cheked else PROMPT_TEMPLATE_WITHOUT_SCHEMA
        
        # 使用简单的字符串格式化而不是 ChatPromptTemplate
        system_message = schema_prompt
        user_message = input_text
        
        logging.info("开始调用 LLM 生成响应...")
        logging.info(f"System prompt: {system_message}")
        logging.info(f"User message: {user_message[:200]}...")
        
        # 直接调用 LLM
        response = llm.invoke(f"{system_message}\n\nUser: {user_message}")
        logging.info(f"LLM 原始响应: {response}")
        
        # 解析响应中的 JSON
        if hasattr(response, 'content'):
            content = response.content
        else:
            content = str(response)
            
        logging.info(f"解析后的响应内容: {content}")
            
        # 提取 JSON 字符串
        try:
            # 尝试直接解析
            schema_dict = json.loads(content)
        except json.JSONDecodeError:
            # 如果直接解析失败，尝试从文本中提取 JSON 部分
            import re
            json_match = re.search(r'\{[\s\S]*\}', content)
            if json_match:
                schema_dict = json.loads(json_match.group())
            else:
                raise ValueError("无法从 LLM 响应中提取有效的 JSON")
        
        logging.info(f"最终解析的 schema: {schema_dict}")
        
        # 验证结果格式
        schema = Schema(**schema_dict)
        return schema.dict()
        
    except Exception as e:
        logging.error(f"Schema extraction failed: {str(e)}")
        logging.error(f"Exception type: {type(e)}")
        logging.error(f"Exception details:", exc_info=True)
        raise ValueError(f"Schema 提取失败: {str(e)}")