#!/usr/bin/env python3
# -*- coding: utf-8
"""
上下文管理服务
负责主题识别、聚类匹配和聚类更新
"""

import time
# import json  # 未使用
from typing import Dict, List, Any, Optional
from utils.logger import emobot_logger
from cache.redis import cache_service
from services.llm import LLMService

logger = emobot_logger.get_logger()


class TopicIdentifier:
    """主题识别器"""
    
    def __init__(self):
        self.llm_service = LLMService()
        self.topic_keywords = {
            "工作": ["工作", "项目", "会议", "任务", "计划", "deadline", "同事", "老板", "公司", "业务"],
            "生活": ["生活", "家庭", "朋友", "娱乐", "休息", "假期", "购物", "美食", "旅游", "休闲"],
            "健康": ["健康", "运动", "医疗", "睡眠", "饮食", "锻炼", "医生", "医院", "身体", "保健"],
            "学习": ["学习", "教育", "知识", "技能", "课程", "培训", "考试", "读书", "学校", "老师"],
            "情感": ["情感", "心情", "感受", "情绪", "压力", "快乐", "难过", "焦虑", "担心", "开心"],
            "技术": ["技术", "编程", "软件", "AI", "科技", "开发", "代码", "算法", "系统", "程序"]
        }
    
    def identify_topic(self, user_message: str, conversation_history: List[Dict] = None) -> Dict[str, Any]:
        """识别消息主题"""
        try:
            # 1. 关键词匹配
            keyword_scores = self._analyze_keywords(user_message)
            
            # 2. LLM语义分析
            semantic_scores = self._analyze_semantics(user_message)
            
            # 3. 上下文分析
            context_scores = self._analyze_context(user_message, conversation_history or [])
            
            # 4. 综合评分
            final_scores = self._combine_scores(keyword_scores, semantic_scores, context_scores)
            
            # 5. 选择最佳主题
            best_topic = max(final_scores.items(), key=lambda x: x[1])
            
            return {
                "topic": best_topic[0],
                "confidence": best_topic[1],
                "all_scores": final_scores,
                "timestamp": time.time()
            }
            
        except Exception as e:
            logger.error(f"主题识别失败: {e}")
            return {
                "topic": "其他",
                "confidence": 0.0,
                "all_scores": {},
                "timestamp": time.time()
            }
    
    def _analyze_keywords(self, message: str) -> Dict[str, float]:
        """关键词分析"""
        scores = {}
        message_lower = message.lower()
        
        for topic, keywords in self.topic_keywords.items():
            score = 0
            for keyword in keywords:
                if keyword in message_lower:
                    score += 1
            scores[topic] = score / len(keywords)
        
        return scores
    
    def _analyze_semantics(self, message: str) -> Dict[str, float]:
        """语义分析"""
        try:
            prompt = f"""
            分析以下消息的主题倾向：
            消息：{message}
            
            请从以下主题中选择最相关的（可多选）：
            - 工作：工作相关话题
            - 生活：日常生活话题
            - 健康：健康医疗话题
            - 学习：教育学习话题
            - 情感：情感心理话题
            - 技术：技术科技话题
            - 其他：其他话题
            
            返回格式：主题:分数（0-1），多个主题用逗号分隔
            """
            
            result = self.llm_service.chat_completion([{"role": "user", "content": prompt}])
            return self._parse_semantic_result(result)
            
        except Exception as e:
            logger.error(f"语义分析失败: {e}")
            return {}
    
    def _analyze_context(self, message: str, conversation_history: List[Dict]) -> Dict[str, float]:
        """上下文分析"""
        if not conversation_history:
            return {}
        
        # 分析最近对话的主题连续性
        recent_topics = []
        for conv in conversation_history[-3:]:  # 最近3轮对话
            if 'topic' in conv:
                recent_topics.append(conv['topic'])
        
        # 计算主题连续性
        context_scores = {}
        for topic in recent_topics:
            context_scores[topic] = 0.8  # 上下文连续性加分
        
        return context_scores
    
    def _combine_scores(self, keyword_scores: Dict, semantic_scores: Dict, context_scores: Dict) -> Dict[str, float]:
        """综合评分"""
        all_topics = set(keyword_scores.keys()) | set(semantic_scores.keys()) | set(context_scores.keys())
        
        final_scores = {}
        for topic in all_topics:
            keyword_score = keyword_scores.get(topic, 0) * 0.4
            semantic_score = semantic_scores.get(topic, 0) * 0.3
            context_score = context_scores.get(topic, 0) * 0.3
            
            final_scores[topic] = keyword_score + semantic_score + context_score
        
        return final_scores
    
    def _parse_semantic_result(self, result: str) -> Dict[str, float]:
        """解析语义分析结果（容错版本）"""
        scores = {}
        try:
            # 只取第一行（忽略后续解释文字）
            first_line = result.strip().split('\n')[0]
            
            # 解析格式：主题:分数，多个用逗号分隔
            parts = first_line.split(',')
            for part in parts:
                if ':' in part:
                    topic, score_str = part.split(':', 1)
                    topic = topic.strip()
                    
                    # 提取纯数字部分（容错处理）
                    score_str = score_str.strip()
                    # 只提取开头的数字和小数点
                    import re
                    match = re.match(r'^\s*([\d.]+)', score_str)
                    if match:
                        try:
                            score = float(match.group(1))
                            scores[topic] = score
                        except ValueError:
                            logger.warning(f"无法解析分数: {score_str}")
                            continue
        except Exception as e:
            logger.error(f"解析语义分析结果失败: {e}, 原始内容: {result[:200]}")
        
        return scores


class ClusterManager:
    """聚类管理器"""
    
    def __init__(self):
        self.cache_service = cache_service
    
    def find_matching_cluster(self, user_id: str, session_id: str, topic: str, message: Dict) -> Optional[str]:
        """查找匹配的聚类"""
        try:
            # 获取当前上下文
            context_key = f"context:{user_id}:{session_id}"
            context = self.cache_service.get(context_key)
            
            if not context or 'topics' not in context:
                return None
            
            # 查找相同主题的聚类
            topic_clusters = context['topics'].get(topic, {})
            if not topic_clusters:
                return None
            
            # 计算相似度
            best_cluster = None
            best_similarity = 0.0
            
            for cluster_id, cluster_data in topic_clusters.items():
                similarity = self._calculate_similarity(cluster_data, message)
                if similarity > best_similarity and similarity > 0.6:  # 相似度阈值
                    best_similarity = similarity
                    best_cluster = cluster_id
            
            return best_cluster
            
        except Exception as e:
            logger.error(f"查找匹配聚类失败: {e}")
            return None
    
    def create_new_cluster(self, user_id: str, session_id: str, topic: str, message: Dict) -> str:
        """创建新聚类"""
        try:
            cluster_id = f"cluster_{int(time.time())}"
            
            # 获取当前上下文
            context_key = f"context:{user_id}:{session_id}"
            context = self.cache_service.get(context_key) or {
                'topics': {},
                'current_topic': None,
                'message_count': 0,
                'last_updated': time.time()
            }
            
            # 创建新聚类
            if topic not in context['topics']:
                context['topics'][topic] = {}
            
            context['topics'][topic][cluster_id] = {
                'messages': [message],
                'created_at': time.time(),
                'last_activity': time.time(),
                'message_count': 1
            }
            
            # 更新上下文
            context['current_topic'] = topic
            context['message_count'] += 1
            context['last_updated'] = time.time()
            
            # 保存到缓存
            self.cache_service.set(context_key, context, expiration=3600)
            
            logger.info(f"创建新聚类: {cluster_id}, 主题: {topic}")
            return cluster_id
            
        except Exception as e:
            logger.error(f"创建新聚类失败: {e}")
            return None
    
    def update_cluster(self, user_id: str, session_id: str, topic: str, cluster_id: str, message: Dict) -> bool:
        """更新聚类"""
        try:
            # 获取当前上下文
            context_key = f"context:{user_id}:{session_id}"
            context = self.cache_service.get(context_key)
            
            if not context or 'topics' not in context:
                return False
            
            # 更新聚类
            if topic in context['topics'] and cluster_id in context['topics'][topic]:
                context['topics'][topic][cluster_id]['messages'].append(message)
                context['topics'][topic][cluster_id]['last_activity'] = time.time()
                context['topics'][topic][cluster_id]['message_count'] += 1
                
                # 更新上下文
                context['current_topic'] = topic
                context['message_count'] += 1
                context['last_updated'] = time.time()
                
                # 保存到缓存
                self.cache_service.set(context_key, context, expiration=3600)
                
                logger.info(f"更新聚类: {cluster_id}, 主题: {topic}")
                return True
            
            return False
            
        except Exception as e:
            logger.error(f"更新聚类失败: {e}")
            return False
    
    def _calculate_similarity(self, cluster_data: Dict, message: Dict) -> float:
        """计算相似度"""
        try:
            # 1. 主题一致性 (50%)
            topic_consistency = 1.0  # 相同主题
            
            # 2. 语义相似度 (30%)
            semantic_similarity = self._calculate_semantic_similarity(cluster_data['messages'], message)
            
            # 3. 时间相关性 (20%)
            time_relevance = self._calculate_time_relevance(cluster_data['last_activity'], message.get('timestamp', time.time()))
            
            # 综合评分
            total_similarity = (
                topic_consistency * 0.5 +
                semantic_similarity * 0.3 +
                time_relevance * 0.2
            )
            
            return total_similarity
            
        except Exception as e:
            logger.error(f"计算相似度失败: {e}")
            return 0.0
    
    def _calculate_semantic_similarity(self, cluster_messages: List[Dict], message: Dict) -> float:
        """计算语义相似度"""
        if not cluster_messages:
            return 0.0
        
        # 使用最近的消息计算相似度
        recent_messages = cluster_messages[-3:]  # 最近3条消息
        
        # 简单的关键词重叠度计算
        message_words = set(message['user_message'].lower().split())
        similarities = []
        
        for cluster_msg in recent_messages:
            cluster_words = set(cluster_msg['user_message'].lower().split())
            intersection = len(message_words.intersection(cluster_words))
            union = len(message_words.union(cluster_words))
            similarity = intersection / union if union > 0 else 0.0
            similarities.append(similarity)
        
        return max(similarities) if similarities else 0.0
    
    def _calculate_time_relevance(self, cluster_time: float, message_time: float) -> float:
        """计算时间相关性"""
        time_diff = abs(message_time - cluster_time)
        
        # 时间差越小，相关性越高
        if time_diff < 3600:  # 1小时内
            return 1.0
        elif time_diff < 86400:  # 1天内
            return 0.8
        elif time_diff < 604800:  # 1周内
            return 0.6
        else:
            return 0.3


class ContextManager:
    """上下文管理器"""
    
    def __init__(self):
        self.topic_identifier = TopicIdentifier()
        self.cluster_manager = ClusterManager()
        self.cache_service = cache_service
    
    def process_message(self, user_id: str, session_id: str, user_message: str, bot_response: str, conversation_history: List[Dict] = None) -> Dict[str, Any]:
        """处理消息并更新上下文"""
        try:
            # 1. 主题识别
            topic_result = self.topic_identifier.identify_topic(user_message, conversation_history)
            topic = topic_result['topic']
            confidence = topic_result['confidence']
            
            # 2. 创建消息对象
            message_obj = {
                'user_message': user_message,
                'bot_response': bot_response,
                'topic': topic,
                'confidence': confidence,
                'timestamp': time.time()
            }
            
            # 3. 查找匹配聚类
            cluster_id = self.cluster_manager.find_matching_cluster(user_id, session_id, topic, message_obj)
            
            # 4. 更新或创建聚类
            if cluster_id:
                # 更新现有聚类
                success = self.cluster_manager.update_cluster(user_id, session_id, topic, cluster_id, message_obj)
                action = "updated"
            else:
                # 创建新聚类
                cluster_id = self.cluster_manager.create_new_cluster(user_id, session_id, topic, message_obj)
                success = cluster_id is not None
                action = "created"
            
            # 5. 返回结果
            result = {
                "success": success,
                "action": action,
                "topic": topic,
                "confidence": confidence,
                "cluster_id": cluster_id,
                "timestamp": time.time()
            }
            
            logger.info(f"上下文处理完成: {result}")
            return result
            
        except Exception as e:
            logger.error(f"上下文处理失败: {e}")
            return {
                "success": False,
                "action": "error",
                "topic": "其他",
                "confidence": 0.0,
                "cluster_id": None,
                "timestamp": time.time(),
                "error": str(e)
            }
    
    def get_context_summary(self, user_id: str, session_id: str) -> Dict[str, Any]:
        """获取上下文摘要"""
        try:
            context_key = f"context:{user_id}:{session_id}"
            context = self.cache_service.get(context_key)
            
            if not context:
                return {"topics": {}, "current_topic": None, "message_count": 0}
            
            # 生成摘要
            summary = {
                "current_topic": context.get('current_topic'),
                "message_count": context.get('message_count', 0),
                "topics": {},
                "last_updated": context.get('last_updated')
            }
            
            # 统计各主题的聚类信息
            for topic, clusters in context.get('topics', {}).items():
                topic_info = {
                    "cluster_count": len(clusters),
                    "total_messages": sum(cluster['message_count'] for cluster in clusters.values()),
                    "last_activity": max(cluster['last_activity'] for cluster in clusters.values()) if clusters else 0
                }
                summary["topics"][topic] = topic_info
            
            return summary
            
        except Exception as e:
            logger.error(f"获取上下文摘要失败: {e}")
            return {"topics": {}, "current_topic": None, "message_count": 0}
