import numpy as np
import time
from datetime import datetime

class CounterfactualReasoner:
    """
    反事实推理器：检测异常行为并分析可能原因
    实现用户行为偏离习惯时的智能响应
    """
    def __init__(self, user_profile):
        self.user_profile = user_profile
        
        # 异常检测参数
        self.anomaly_threshold = 0.8  # 异常分数阈值
        self.behavior_window = 10     # 行为窗口大小
        
        # 行为历史 - 用于异常检测
        self.behavior_history = {
            'features': [],
            'contexts': [],
            'timestamps': [],
            'anomaly_scores': []
        }
        
        # 可能的异常原因库
        self.possible_causes = {
            'general': [
                ('新学习阶段', 0.3),
                ('任务变更', 0.25),
                ('先前理解错误', 0.2),
                ('环境变化', 0.15),
                ('随机波动', 0.1)
            ],
            'work': [
                ('工作负载变化', 0.3),
                ('任务优先级调整', 0.25),
                ('新工具/系统适应', 0.2),
                ('沟通对象变化', 0.15),
                ('时间压力', 0.1)
            ],
            'casual': [
                ('情绪变化', 0.3),
                ('社交环境变化', 0.25),
                ('兴趣转移', 0.2),
                ('疲劳状态', 0.15),
                ('外部干扰', 0.1)
            ],
            'learning': [
                ('概念理解突破', 0.3),
                ('学习目标调整', 0.25),
                ('知识水平提升', 0.2),
                ('学习方法改变', 0.15),
                ('注意力分散', 0.1)
            ]
        }
        
        # 推理统计
        self.reason_stats = {
            'anomalies_detected': 0,
            'clarifications_generated': 0,
            'avg_reasoning_time': [],
            'cause_prediction_accuracy': []
        }
        
        # 习惯漂移监测
        self.habit_drift_threshold = 0.15  # 每天习惯漂移率报警阈值
        self.daily_drift_scores = []
        self.last_drift_check = time.time()

    def _calculate_behavior_baseline(self, context):
        """计算特定情境下的行为基线"""
        # 过滤相同情境的历史行为
        context_indices = [i for i, c in enumerate(self.behavior_history['contexts']) if c == context]
        
        if len(context_indices) < 3:
            # 没有足够的相同情境数据，使用所有数据
            if len(self.behavior_history['features']) < 3:
                return None  # 没有足够数据计算基线
            return np.mean(self.behavior_history['features'], axis=0)
        
        # 使用相同情境的数据计算基线
        context_features = [self.behavior_history['features'][i] for i in context_indices]
        return np.mean(context_features, axis=0)
    
    def _calculate_anomaly_score(self, features, baseline, context):
        """计算异常分数"""
        if baseline is None:
            return 0.0  # 没有基线，无法计算异常分数
        
        # 计算与基线的距离
        distance = np.linalg.norm(features - baseline)
        
        # 归一化异常分数（0-1）
        # 基于历史数据的最大距离进行归一化
        context_features = [f for f, c in zip(self.behavior_history['features'], 
                                             self.behavior_history['contexts']) if c == context]
        
        if context_features:
            max_distance = max([np.linalg.norm(f - baseline) for f in context_features])
            if max_distance > 0:
                return min(distance / max_distance, 1.0)
        
        # 如果没有足够数据，使用默认缩放
        return min(distance / 2.0, 1.0)  # 假设最大合理距离为2.0
    
    def _identify_anomaly_features(self, features, baseline):
        """识别异常的具体特征"""
        if baseline is None:
            return []
            
        # 计算每个特征的异常程度
        feature_diff = np.abs(features - baseline)
        normalized_diff = feature_diff / (np.max(feature_diff) + 1e-8)
        
        # 返回异常程度高的特征索引
        return [i for i, diff in enumerate(normalized_diff) if diff > 0.7]
    
    def _check_habit_drift(self):
        """检查习惯漂移率"""
        current_time = time.time()
        time_since_last_check = current_time - self.last_drift_check
        
        # 每天检查一次
        if time_since_last_check < 86400:  # 86400秒 = 1天
            return False, 0.0
            
        # 计算过去24小时的习惯漂移
        if len(self.behavior_history['timestamps']) < 2:
            self.last_drift_check = current_time
            return False, 0.0
            
        # 过滤过去24小时的数据
        day_ago = current_time - 86400
        recent_indices = [i for i, t in enumerate(self.behavior_history['timestamps']) if t >= day_ago]
        
        if len(recent_indices) < 5:  # 需要至少5个数据点
            self.last_drift_check = current_time
            return False, 0.0
            
        # 计算漂移分数
        recent_features = [self.behavior_history['features'][i] for i in recent_indices]
        recent_baseline = np.mean(recent_features[:len(recent_features)//2], axis=0)
        recent_mean = np.mean(recent_features[len(recent_features)//2:], axis=0)
        
        drift_score = np.linalg.norm(recent_mean - recent_baseline)
        
        # 记录日漂移分数
        self.daily_drift_scores.append(drift_score)
        if len(self.daily_drift_scores) > 7:  # 保留最近7天
            self.daily_drift_scores.pop(0)
            
        self.last_drift_check = current_time
        
        # 检查是否超过阈值
        return drift_score > self.habit_drift_threshold, drift_score
    
    def detect_anomalies(self, features, context):
        """
        检测异常行为
        返回异常分数和异常特征
        """
        start_time = time.time()
        
        # 存储行为历史
        self.behavior_history['features'].append(features)
        self.behavior_history['contexts'].append(context)
        self.behavior_history['timestamps'].append(time.time())
        
        # 保持窗口大小
        if len(self.behavior_history['features']) > self.behavior_window:
            self.behavior_history['features'].pop(0)
            self.behavior_history['contexts'].pop(0)
            self.behavior_history['timestamps'].pop(0)
        
        # 计算行为基线
        baseline = self._calculate_behavior_baseline(context)
        
        # 计算异常分数
        anomaly_score = self._calculate_anomaly_score(features, baseline, context)
        self.behavior_history['anomaly_scores'].append(anomaly_score)
        
        # 识别异常特征
        anomaly_features = self._identify_anomaly_features(features, baseline)
        
        # 检查习惯漂移
        drift_detected, drift_score = self._check_habit_drift()
        if drift_detected:
            print(f"警告: 习惯漂移率超过阈值 ({drift_score:.2f} > {self.habit_drift_threshold})")
        
        # 更新统计
        reasoning_time = time.time() - start_time
        self.reason_stats['avg_reasoning_time'].append(reasoning_time)
        
        if anomaly_score > self.anomaly_threshold:
            self.reason_stats['anomalies_detected'] += 1
        
        return anomaly_score, anomaly_features
    
    def reason_about_anomaly(self, features, context, anomaly_features):
        """
        分析异常行为的可能原因
        基于反事实推理
        """
        # 获取情境相关的可能原因
        context_causes = self.possible_causes.get(context, [])
        general_causes = self.possible_causes['general']
        
        # 合并原因并去重
        all_causes = {}
        for cause, prob in context_causes + general_causes:
            if cause in all_causes:
                all_causes[cause] = max(all_causes[cause], prob)
            else:
                all_causes[cause] = prob
        
        # 根据异常特征调整概率
        adjusted_causes = []
        for cause, prob in all_causes.items():
            # 这里可以根据异常特征类型调整概率
            # 简化实现：基于异常特征数量调整
            adjustment = 1.0 + (len(anomaly_features) * 0.1)
            adjusted_prob = min(prob * adjustment, 1.0)
            adjusted_causes.append((cause, adjusted_prob))
        
        # 排序并返回
        adjusted_causes.sort(key=lambda x: -x[1])
        return adjusted_causes
    
    def generate_clarification(self, possible_causes, context):
        """基于可能的原因生成澄清问题"""
        if not possible_causes:
            # 默认澄清问题
            return "我注意到您的使用习惯似乎有所变化，是否需要我调整响应方式？"
        
        # 基于最可能的原因生成问题
        top_cause = possible_causes[0][0]
        
        # 根据不同原因生成不同的澄清问题
        clarification_templates = {
            '新学习阶段': "看起来您可能正在学习新内容，需要我提供更详细的解释吗？",
            '任务变更': "您似乎在处理不同类型的任务，需要我调整响应风格吗？",
            '先前理解错误': "抱歉，我可能误解了您的需求，能再详细说明一下吗？",
            '环境变化': "您的使用环境似乎有所变化，需要我调整响应方式吗？",
            '工作负载变化': "您的工作节奏似乎有所变化，需要我加快或减慢响应速度吗？",
            '情绪变化': "我注意到您的沟通风格有所变化，需要我调整互动方式吗？",
            '概念理解突破': "看起来您对这个概念有了新的理解，需要更深入的内容吗？"
        }
        
        # 记录统计
        self.reason_stats['clarifications_generated'] += 1
        
        # 使用匹配的模板或默认模板
        return clarification_templates.get(top_cause, 
            f"我注意到您的需求似乎有所变化，是因为{top_cause}吗？需要我调整一下吗？")
    
    def get_reason_stats(self):
        """返回推理统计信息"""
        avg_reasoning_time = np.mean(self.reason_stats['avg_reasoning_time']) \
                            if self.reason_stats['avg_reasoning_time'] else 0
        
        avg_drift = np.mean(self.daily_drift_scores) if self.daily_drift_scores else 0
        
        return {
            'anomalies_detected': self.reason_stats['anomalies_detected'],
            'clarifications_generated': self.reason_stats['clarifications_generated'],
            'avg_reasoning_time': avg_reasoning_time,
            'avg_habit_drift': avg_drift,
            'drift_threshold': self.habit_drift_threshold,
            'behavior_window_size': len(self.behavior_history['features'])
        }
    