"""
智能查询重写模块
基于对话历史进行上下文引用解析和代词消解

架构说明：
- 统一在LangGraph Supervisor中调用，避免重复实现
- 提供异步接口，支持高并发处理
- 系统唯一的查询重写实现，确保一致性

功能特性：
- 代词消解：处理"它"、"这个"、"那个"等代词引用
- 省略补全：处理"那第二多的呢？"等省略查询
- 时间引用：处理"刚才"、"之前"等时间表达
- 上下文理解：基于对话历史理解查询意图
"""

import re
import json
from typing import Dict, Any, List, Optional, Tuple
from dataclasses import dataclass
from datetime import datetime
import logging

from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field

logger = logging.getLogger(__name__)

class QueryRewriteResult(BaseModel):
    """查询重写结果"""
    original_query: str = Field(description="原始查询")
    rewritten_query: str = Field(description="重写后的查询")
    confidence: float = Field(description="重写置信度", ge=0.0, le=1.0)
    reference_type: str = Field(description="引用类型")
    resolved_entities: List[str] = Field(description="解析的实体列表")
    context_used: List[str] = Field(description="使用的上下文信息")

@dataclass
class ContextEntity:
    """上下文实体"""
    entity: str
    entity_type: str  # person, location, time, object, etc.
    mention_count: int
    last_mentioned: datetime
    confidence: float

class QueryRewriter:
    """智能查询重写器"""
    
    def __init__(self, model_name: str = None):
        """初始化查询重写器"""
        import os
        
        self.model = ChatOpenAI(
            model=model_name or os.getenv("BASE_MODEL", "gpt-4"),
            api_key=os.getenv("OPENAI_API_KEY"),
            base_url=os.getenv("OPENAI_BASE_URL"),
            temperature=0.1  # 低温度确保一致性
        )
        
        # 代词映射
        self.pronouns = {
            "它", "他", "她", "这", "那", "这个", "那个", "这些", "那些",
            "这里", "那里", "今天", "明天", "昨天", "现在", "刚才", "之前"
        }
        
        # 时间表达式
        self.time_expressions = {
            "今天", "明天", "昨天", "后天", "前天", "现在", "刚才", "之前", "以后",
            "上午", "下午", "晚上", "早上", "中午", "深夜", "这周", "下周", "上周",
            "这个月", "下个月", "上个月", "今年", "明年", "去年"
        }
        
        # 省略指示词
        self.ellipsis_indicators = {
            "的呢", "呢", "的话", "怎么样", "如何", "多少", "什么时候", "在哪里"
        }

        # 确认性回复指示词（新增）
        self.confirmation_indicators = {
            "不用", "不需要", "不要", "别", "好的", "好", "行", "可以", "确认",
            "继续", "开始", "执行", "进行", "是的", "对的", "没错", "同意", "开始吧"
        }
        
        self._init_prompts()
        
    def _init_prompts(self):
        """初始化提示词模板"""
        
        # 上下文分析提示词
        self.context_analysis_prompt = ChatPromptTemplate.from_template("""
你是一个专业的对话上下文分析专家。请分析以下对话历史，提取关键实体和上下文信息。

对话历史：
{conversation_history}

请提取以下信息：
1. 人物实体（人名、称谓）
2. 地点实体（地名、位置）
3. 时间实体（具体时间、时间表达）
4. 对象实体（物品、概念、主题）
5. 动作实体（动词、行为）

对于每个实体，请提供：
- 实体名称
- 实体类型
- 在对话中的重要程度（1-10）
- 最后提及的位置

请以JSON格式返回结果。
""")
        
        # 查询重写提示词
        self.rewrite_prompt = ChatPromptTemplate.from_template("""
你是一个专业的查询重写专家。请根据对话上下文，将用户的查询重写为完整、明确的查询。

对话历史：
{conversation_history}

当前用户查询：{current_query}

上下文实体：
{context_entities}

请分析用户查询中的：
1. 代词引用（如"它"、"这个"、"那个"等）
2. 省略成分（如"明天的呢？"中省略的主语）
3. 时间引用（如"明天"、"刚才"等）
4. 隐含引用（基于上下文的隐含信息）
5. 确认性回复（如"不用，你继续添加"、"好的，开始吧"等）

重写规则：
1. 保持原意不变
2. 补全省略的信息
3. 解析代词和引用
4. 明确时间和地点
5. 对于确认性回复，结合上下文明确用户要确认的具体操作
6. 保持自然语言风格

特别注意：
- 如果用户说"不用，你继续添加"，应该理解为确认执行之前讨论的添加操作
- 如果用户说"好的，开始吧"，应该理解为确认开始之前提到的任务
- 确认性回复需要结合对话历史中的具体操作内容进行重写

请返回重写后的完整查询，并说明重写的原因。

格式要求：
- 重写后查询：[完整的查询语句]
- 重写原因：[简要说明]
- 置信度：[0-1之间的数值]
""")
        
        # 实体解析提示词
        self.entity_resolution_prompt = ChatPromptTemplate.from_template("""
请解析以下查询中的实体引用：

查询：{query}
上下文：{context}

需要解析的引用类型：
1. 代词引用：它、他、她、这、那等
2. 时间引用：明天、昨天、刚才等
3. 地点引用：这里、那里等
4. 对象引用：这个、那个等

请为每个引用找到对应的具体实体，并给出置信度。

返回JSON格式：
{{
    "resolutions": [
        {{
            "reference": "引用词",
            "resolved_entity": "解析后的实体",
            "confidence": 0.95,
            "type": "引用类型"
        }}
    ]
}}
""")

# 同步版本已移除 - 统一使用异步版本在LangGraph Supervisor中调用

    async def rewrite_query(
        self,
        current_query: str,
        conversation_history: List[Dict[str, str]],
        user_id: str = None
    ) -> QueryRewriteResult:
        """重写查询 - 系统唯一的查询重写实现

        统一在LangGraph Supervisor中调用，避免重复实现和计算成本
        """
        try:
            # 1. 检查是否需要重写
            if not self._needs_rewriting(current_query):
                return QueryRewriteResult(
                    original_query=current_query,
                    rewritten_query=current_query,
                    confidence=1.0,
                    reference_type="none",
                    resolved_entities=[],
                    context_used=[]
                )

            # 2. 分析对话上下文
            context_entities = await self._analyze_context(conversation_history)

            # 3. 执行查询重写
            rewrite_result = await self._perform_rewrite(
                current_query,
                conversation_history,
                context_entities
            )

            # 4. 验证重写结果
            validated_result = await self._validate_rewrite(rewrite_result)

            logger.info(f"Query rewritten (async): '{current_query}' -> '{validated_result.rewritten_query}'")

            return validated_result

        except Exception as e:
            logger.error(f"Failed to rewrite query (async): {e}")
            # 返回原查询作为降级方案
            return QueryRewriteResult(
                original_query=current_query,
                rewritten_query=current_query,
                confidence=0.0,
                reference_type="error",
                resolved_entities=[],
                context_used=[]
            )
    
    def _needs_rewriting(self, query: str) -> bool:
        """判断查询是否需要重写"""
        if not query:
            return False

        try:
            # 检查代词
            has_pronouns = any(pronoun in query for pronoun in self.pronouns)

            # 检查省略指示词
            has_ellipsis = any(indicator in query for indicator in self.ellipsis_indicators)

            # 检查不完整的查询（长度过短且包含疑问词）
            is_incomplete = len(query) < 10 and any(word in query for word in ["呢", "吗", "的", "怎么"])

            # 检查时间引用
            has_time_ref = any(time_expr in query for time_expr in self.time_expressions)

            # 检查确认性回复（新增）
            has_confirmation = any(indicator in query for indicator in self.confirmation_indicators)

            # 特殊检查：包含"继续"的确认性回复
            has_continue_confirmation = "继续" in query and any(word in query for word in ["不用", "好的", "行", "可以", "开始"])

            return has_pronouns or has_ellipsis or is_incomplete or has_time_ref or has_confirmation or has_continue_confirmation

        except Exception as e:
            logger.error(f"Error in _needs_rewriting: {e}")
            return False
    
# 同步版本的上下文分析已移除 - 统一使用异步版本

    async def _analyze_context(self, conversation_history: List[Dict[str, str]]) -> List[ContextEntity]:
        """分析对话上下文（异步版本，保留用于LangGraph Supervisor）"""
        try:
            if not conversation_history:
                return []

            # 格式化对话历史
            formatted_history = self._format_conversation_history(conversation_history)

            # 调用LLM分析上下文
            response = await self.model.ainvoke(
                self.context_analysis_prompt.format(
                    conversation_history=formatted_history
                )
            )

            # 解析响应
            entities = self._parse_context_entities(response.content)

            return entities

        except Exception as e:
            logger.error(f"Failed to analyze context (async): {e}")
            return []
    
# 同步版本的查询重写执行已移除 - 统一使用异步版本

    async def _perform_rewrite(
        self,
        query: str,
        conversation_history: List[Dict[str, str]],
        context_entities: List[ContextEntity]
    ) -> QueryRewriteResult:
        """执行查询重写（异步版本，保留用于LangGraph Supervisor）"""
        try:
            # 格式化输入
            formatted_history = self._format_conversation_history(conversation_history)
            formatted_entities = self._format_context_entities(context_entities)

            # 调用LLM重写查询
            response = await self.model.ainvoke(
                self.rewrite_prompt.format(
                    conversation_history=formatted_history,
                    current_query=query,
                    context_entities=formatted_entities
                )
            )

            # 解析重写结果
            result = self._parse_rewrite_response(query, response.content, context_entities)

            return result

        except Exception as e:
            logger.error(f"Failed to perform rewrite (async): {e}")
            raise
    
# 同步版本的重写验证已移除 - 统一使用异步版本

    async def _validate_rewrite(self, result: QueryRewriteResult) -> QueryRewriteResult:
        """验证重写结果（异步版本，保留用于LangGraph Supervisor）"""
        try:
            # 基本验证
            if not result.rewritten_query or len(result.rewritten_query.strip()) == 0:
                result.rewritten_query = result.original_query
                result.confidence = 0.0

            # 长度验证
            if len(result.rewritten_query) > 500:  # 避免过长的查询
                result.rewritten_query = result.original_query
                result.confidence = 0.0

            # 相似度验证（避免过度重写）
            similarity = self._calculate_text_similarity(
                result.original_query,
                result.rewritten_query
            )

            if similarity < 0.3:  # 如果差异过大，降低置信度
                result.confidence *= 0.5

            return result

        except Exception as e:
            logger.error(f"Failed to validate rewrite (async): {e}")
            return result
    
    def _format_conversation_history(self, history: List[Dict[str, str]]) -> str:
        """格式化对话历史"""
        formatted = []
        for turn in history[-10:]:  # 只取最近10轮对话
            role = turn.get("role", "user")
            content = turn.get("content", "")
            formatted.append(f"{role}: {content}")
        
        return "\n".join(formatted)
    
    def _format_context_entities(self, entities: List[ContextEntity]) -> str:
        """格式化上下文实体"""
        if not entities:
            return "无相关实体"
        
        formatted = []
        for entity in entities[:10]:  # 限制实体数量
            formatted.append(
                f"- {entity.entity} ({entity.entity_type}, 重要度: {entity.confidence:.2f})"
            )
        
        return "\n".join(formatted)
    
    def _parse_context_entities(self, response_content: str) -> List[ContextEntity]:
        """解析上下文实体"""
        entities = []
        try:
            # 尝试解析JSON
            if "{" in response_content and "}" in response_content:
                json_start = response_content.find("{")
                json_end = response_content.rfind("}") + 1
                json_str = response_content[json_start:json_end]
                data = json.loads(json_str)
                
                # 处理不同的JSON结构
                if isinstance(data, dict):
                    for entity_type, entity_list in data.items():
                        if isinstance(entity_list, list):
                            for entity_info in entity_list:
                                if isinstance(entity_info, dict):
                                    entity = ContextEntity(
                                        entity=entity_info.get("name", ""),
                                        entity_type=entity_type,
                                        mention_count=1,
                                        last_mentioned=datetime.now(),
                                        confidence=entity_info.get("importance", 5) / 10.0
                                    )
                                    entities.append(entity)
            
        except Exception as e:
            logger.warning(f"Failed to parse context entities: {e}")
            # 降级方案：使用简单的关键词提取
            entities = self._extract_keywords_as_entities(response_content)
        
        return entities
    
    def _extract_keywords_as_entities(self, text: str) -> List[ContextEntity]:
        """从文本中提取关键词作为实体（降级方案）"""
        entities = []
        
        # 简单的关键词提取
        words = re.findall(r'\b\w+\b', text)
        word_freq = {}
        
        for word in words:
            if len(word) > 1 and word not in ["的", "是", "在", "有", "和", "与"]:
                word_freq[word] = word_freq.get(word, 0) + 1
        
        # 取频率最高的词作为实体
        for word, freq in sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:5]:
            entity = ContextEntity(
                entity=word,
                entity_type="keyword",
                mention_count=freq,
                last_mentioned=datetime.now(),
                confidence=min(freq / 3.0, 1.0)
            )
            entities.append(entity)
        
        return entities
    
    def _parse_rewrite_response(
        self,
        original_query: str,
        response_content: str,
        context_entities: List[ContextEntity]
    ) -> QueryRewriteResult:
        """解析重写响应（优化版本，更加灵活）"""
        try:
            lines = response_content.strip().split('\n')

            rewritten_query = original_query
            confidence = 0.5
            reference_type = "unknown"
            resolved_entities = []
            context_used = []

            # 更灵活的解析逻辑
            for line in lines:
                line = line.strip()

                # 查找重写后的查询（支持多种格式）
                if any(keyword in line for keyword in ["重写后查询：", "重写查询：", "重写后：", "查询："]):
                    parts = re.split(r'[：:]', line, 1)
                    if len(parts) > 1:
                        rewritten_query = parts[1].strip()
                        # 移除可能的方括号
                        if rewritten_query.startswith("[") and rewritten_query.endswith("]"):
                            rewritten_query = rewritten_query[1:-1]
                        # 移除引号
                        if rewritten_query.startswith('"') and rewritten_query.endswith('"'):
                            rewritten_query = rewritten_query[1:-1]

                # 查找置信度
                elif any(keyword in line for keyword in ["置信度：", "置信度:", "confidence"]):
                    try:
                        # 提取数字
                        confidence_match = re.search(r'(\d+\.?\d*)', line)
                        if confidence_match:
                            confidence_val = float(confidence_match.group(1))
                            # 如果是百分比形式，转换为小数
                            if confidence_val > 1:
                                confidence_val = confidence_val / 100
                            confidence = min(max(confidence_val, 0.0), 1.0)
                    except:
                        confidence = 0.5

                # 查找重写原因
                elif any(keyword in line for keyword in ["重写原因：", "原因：", "reason"]):
                    reason = line.lower()
                    if "代词" in reason or "pronoun" in reason:
                        reference_type = "pronoun"
                    elif "时间" in reason or "temporal" in reason or "time" in reason:
                        reference_type = "temporal"
                    elif "省略" in reason or "ellipsis" in reason:
                        reference_type = "ellipsis"
                    else:
                        reference_type = "context"

            # 如果没有找到明确的重写查询，尝试从整个响应中提取
            if rewritten_query == original_query:
                # 查找可能的重写结果
                potential_rewrites = []
                for line in lines:
                    line = line.strip()
                    # 跳过明显的标签行
                    if any(keyword in line for keyword in ["重写", "原因", "置信度", "分析"]):
                        continue
                    # 如果行包含查询相关的词汇且不是原查询，可能是重写结果
                    if len(line) > 5 and line != original_query and any(char in line for char in "？?"):
                        potential_rewrites.append(line)

                if potential_rewrites:
                    rewritten_query = potential_rewrites[0]

            # 智能判断是否真的进行了重写
            if rewritten_query != original_query:
                # 计算相似度，如果太相似可能没有实际重写
                similarity = self._calculate_text_similarity(original_query, rewritten_query)
                if similarity > 0.9:  # 如果相似度太高，认为没有有效重写
                    confidence *= 0.5
                else:
                    # 如果确实进行了重写，提高置信度
                    confidence = max(confidence, 0.7)

            # 提取使用的上下文
            for entity in context_entities:
                if entity.entity in rewritten_query and entity.entity not in original_query:
                    resolved_entities.append(entity.entity)
                    context_used.append(f"{entity.entity} ({entity.entity_type})")

            # 如果解析出了实体，提高置信度
            if resolved_entities:
                confidence = max(confidence, 0.8)

            return QueryRewriteResult(
                original_query=original_query,
                rewritten_query=rewritten_query,
                confidence=confidence,
                reference_type=reference_type,
                resolved_entities=resolved_entities,
                context_used=context_used
            )

        except Exception as e:
            logger.error(f"Failed to parse rewrite response: {e}")
            return QueryRewriteResult(
                original_query=original_query,
                rewritten_query=original_query,
                confidence=0.0,
                reference_type="error",
                resolved_entities=[],
                context_used=[]
            )
    
    def _calculate_text_similarity(self, text1: str, text2: str) -> float:
        """计算文本相似度（简单实现）"""
        words1 = set(text1.split())
        words2 = set(text2.split())
        
        intersection = words1.intersection(words2)
        union = words1.union(words2)
        
        if len(union) == 0:
            return 0.0
        
        return len(intersection) / len(union)
