import numpy as np
import time
from datetime import datetime, timedelta

class LearningEngine:
    """
    学习引擎：实现三阶习惯强化机制
    包含龟通道（慢学习）和兔通道（快学习）
    解决反馈维度单一化问题
    """
    def __init__(self, user_profile):
        self.user_profile = user_profile
        
        # 双通道学习系统
        self.turtle_channel = {  # 龟通道 - 慢学习，长期习惯
            'weights': None,
            'learning_rate': 0.01,
            'decay_rate': 0.997,  # 慢衰减
            'last_updated': None
        }
        
        self.rabbit_channel = {  # 兔通道 - 快学习，即时适应
            'weights': None,
            'learning_rate': 0.1,
            'decay_rate': 0.95,   # 快衰减
            'last_updated': None
        }
        
        # 学习状态
        self.feature_dim = None
        self.learning_history = []
        self.conflict_history = []
        
        # 突触可塑性参数
        self.eta = 0.1  # 学习率因子
        self.gamma = 0.7  # 历史积累因子（初始值，会动态调整）
        
        # 三阶强化机制状态
        self.immediate_state = {}  # 即时强化状态
        self.short_term_memory = []  # 短期记忆（近3次会话）
        self.long_term_trends = []  # 长期趋势（30天窗口）
        
        # 性能指标
        self.performance_metrics = {
            'learning_steps': 0,
            'conflicts_detected': 0,
            'avg_adaptation_time': [],
            'habit_stability': []
        }
    
    def _initialize_weights(self, feature_dim):
        """初始化学习权重"""
        if self.feature_dim is None:
            self.feature_dim = feature_dim
            self.turtle_channel['weights'] = np.random.normal(0, 0.1, size=feature_dim)
            self.rabbit_channel['weights'] = np.random.normal(0, 0.1, size=feature_dim)
            self.turtle_channel['last_updated'] = time.time()
            self.rabbit_channel['last_updated'] = time.time()
    
    def _apply_decay(self, channel):
        """应用时间衰减"""
        if channel['last_updated'] is None:
            return
            
        current_time = time.time()
        time_diff = current_time - channel['last_updated']
        # 按小时衰减
        hours_passed = time_diff / 3600
        decay_factor = channel['decay_rate'] ** hours_passed
        
        channel['weights'] *= decay_factor
        channel['last_updated'] = current_time
    
    def _calculate_stability_score(self):
        """计算当前系统稳定性分数"""
        if len(self.learning_history) < 5:
            return 0.5  # 初始稳定性
        
        # 最近5次学习的变化量
        recent_changes = [np.linalg.norm(self.learning_history[i] - self.learning_history[i-1]) 
                         for i in range(-5, 0)]
        
        # 变化越小，稳定性越高
        stability = 1 - np.mean(recent_changes)
        return np.clip(stability, 0.1, 0.9)
    
    def _calculate_emergency_level(self, feedback_score):
        """计算紧急程度：低评分需要更快速的适应"""
        # 评分越低，紧急程度越高（0-1）
        return np.clip(1 - feedback_score, 0.1, 0.9)
    
    def gradient_arbitration(self, gradient1, gradient2, feedback_score):
        """
        梯度仲裁机制：解决双通道学习冲突
        采用动态加权平均
        """
        stability_score = self._calculate_stability_score()
        emergency_level = self._calculate_emergency_level(feedback_score)
        
        # 计算动态权重比例
        ratio = stability_score / (emergency_level + 1e-6)
        ratio = np.clip(ratio, 0.1, 0.9)  # 限制极端比例
        
        # 检测冲突
        conflict_detected = np.dot(gradient1, gradient2) < 0
        
        if conflict_detected:
            self.conflict_history.append({
                'timestamp': time.time(),
                'stability': stability_score,
                'emergency': emergency_level,
                'ratio': ratio
            })
            self.performance_metrics['conflicts_detected'] += 1
        
        # 动态加权平均
        arbitrated_gradient = gradient1 * ratio + gradient2 * (1 - ratio)
        return arbitrated_gradient, conflict_detected
    
    def _update_learning_rates(self, feedback_score):
        """基于反馈动态调整学习率"""
        # 好的反馈降低学习率，差的反馈提高学习率
        adjustment = 1.0 + (0.5 - feedback_score) * 0.4
        
        self.turtle_channel['learning_rate'] = np.clip(
            self.turtle_channel['learning_rate'] * adjustment, 0.001, 0.1)
            
        self.rabbit_channel['learning_rate'] = np.clip(
            self.rabbit_channel['learning_rate'] * adjustment, 0.01, 0.5)
    
    def _dynamic_gamma_adjustment(self, features):
        """基于信息熵动态调整γ参数"""
        # 计算特征信息熵
        normalized_features = (features - np.min(features)) / (np.max(features) - np.min(features) + 1e-8)
        entropy = -np.sum(normalized_features * np.log(normalized_features + 1e-8))
        
        # 信息熵越高，γ越小（更重视新信息）
        self.gamma = np.clip(0.9 - entropy * 0.5, 0.3, 0.9)
    
    def learn_from_feedback(self, feedback, features, importance=1.0):
        """
        从用户反馈中学习
        实现生物启发的突触可塑性规则
        Δweight = η*(current_importance - γ*historical_accumulation)
        """
        start_time = time.time()
        
        # 确保特征维度正确
        features = np.array(features)
        self._initialize_weights(len(features))
        
        # 应用时间衰减
        self._apply_decay(self.turtle_channel)
        self._apply_decay(self.rabbit_channel)
        
        # 解析反馈
        feedback_score = feedback['score']
        correction = feedback['correction']
        
        # 动态调整γ参数
        self._dynamic_gamma_adjustment(features)
        
        # 计算当前梯度
        # 反馈分数越高，与当前特征的一致性越高
        current_importance = feedback_score * importance
        historical_accumulation = np.dot(self.turtle_channel['weights'], features)
        
        # 应用突触可塑性规则
        gradient = self.eta * (current_importance - self.gamma * historical_accumulation) * features
        
        # 加入修正因子
        gradient += correction * 0.1 * np.sign(gradient)
        
        # 双通道计算
        turtle_gradient = gradient * self.turtle_channel['learning_rate']
        rabbit_gradient = gradient * self.rabbit_channel['learning_rate']
        
        # 梯度仲裁
        arbitrated_gradient, conflict_detected = self.gradient_arbitration(
            turtle_gradient, rabbit_gradient, feedback_score)
        
        # 更新权重
        self.turtle_channel['weights'] += arbitrated_gradient * 0.3  # 龟通道权重较低
        self.rabbit_channel['weights'] += arbitrated_gradient * 0.7  # 兔通道权较高
        self.turtle_channel['last_updated'] = time.time()
        self.rabbit_channel['last_updated'] = time.time()
        
        # 记录学习历史
        combined_weights = (self.turtle_channel['weights'] * 0.6 + 
                           self.rabbit_channel['weights'] * 0.4)
        self.learning_history.append(combined_weights.copy())
        
        # 更新学习率
        self._update_learning_rates(feedback_score)
        
        # 三阶习惯强化
        self._immediate_reinforcement(feedback, features)
        self._short_term_reinforcement(feedback, features)
        self._long_term_reinforcement(feedback, features)
        
        # 更新性能指标
        adaptation_time = time.time() - start_time
        self.performance_metrics['learning_steps'] += 1
        self.performance_metrics['avg_adaptation_time'].append(adaptation_time)
        
        # 计算习惯稳定性
        stability = self._calculate_stability_score()
        self.performance_metrics['habit_stability'].append(stability)
        
        return {
            'weights_updated': True,
            'conflict_detected': conflict_detected,
            'adaptation_time': adaptation_time,
            'stability_score': stability
        }
    
    def _immediate_reinforcement(self, feedback, features):
        """即时强化：会话内动态调参"""
        # 记录当前会话的反馈
        current_time = time.time()
        self.immediate_state = {
            'feedback_score': feedback['score'],
            'features': features,
            'timestamp': current_time,
            'adjustments': []
        }
    
    def _short_term_reinforcement(self, feedback, features):
        """短期强化：近3次会话模式分析"""
        # 保留最近3次会话
        self.short_term_memory.append({
            'feedback': feedback,
            'features': features,
            'timestamp': time.time()
        })
        
        if len(self.short_term_memory) > 3:
            self.short_term_memory.pop(0)
        
        # 当有足够数据时，分析短期模式
        if len(self.short_term_memory) == 3:
            # 计算平均反馈分数
            avg_feedback = np.mean([m['feedback']['score'] for m in self.short_term_memory])
            
            # 如果平均反馈较低，增加学习率
            if avg_feedback < 0.5:
                self.turtle_channel['learning_rate'] *= 1.2
                self.rabbit_channel['learning_rate'] *= 1.2
    
    def _long_term_reinforcement(self, feedback, features):
        """长期强化：30天内的风格迁移趋势"""
        # 记录长期趋势（带时间戳）
        self.long_term_trends.append({
            'date': datetime.now().date(),
            'feedback_score': feedback['score'],
            'feature_norm': np.linalg.norm(features),
            'turtle_norm': np.linalg.norm(self.turtle_channel['weights']),
            'rabbit_norm': np.linalg.norm(self.rabbit_channel['weights'])
        })
        
        # 只保留最近30天的数据
        thirty_days_ago = datetime.now().date() - timedelta(days=30)
        self.long_term_trends = [t for t in self.long_term_trends 
                               if t['date'] >= thirty_days_ago]
        
        # 分析长期趋势（至少需要14天数据）
        if len(self.long_term_trends) >= 14:
            # 按日期分组
            daily_data = {}
            for entry in self.long_term_trends:
                date_str = str(entry['date'])
                if date_str not in daily_data:
                    daily_data[date_str] = []
                daily_data[date_str].append(entry)
            
            # 计算每日平均值
            daily_means = []
            for date in sorted(daily_data.keys()):
                entries = daily_data[date]
                mean_entry = {
                    'date': date,
                    'feedback_score': np.mean([e['feedback_score'] for e in entries]),
                    'feature_norm': np.mean([e['feature_norm'] for e in entries]),
                    'turtle_norm': np.mean([e['turtle_norm'] for e in entries]),
                    'rabbit_norm': np.mean([e['rabbit_norm'] for e in entries])
                }
                daily_means.append(mean_entry)
            
            # 如果观察到明显的趋势变化，触发模型更新
            if len(daily_means) >= 7:
                # 比较最近7天和之前7天的反馈变化
                recent_feedback = np.mean([d['feedback_score'] for d in daily_means[-7:]])
                prior_feedback = np.mean([d['feedback_score'] for d in daily_means[-14:-7]])
                
                # 如果反馈下降明显，触发用户画像更新
                if recent_feedback < prior_feedback - 0.15:
                    # 这里会触发用户画像的重构
                    pass
    
    def predict_adjustment(self, features):
        """预测对新输入的调整量"""
        if self.turtle_channel['weights'] is None:
            return 0.0
            
        features = np.array(features)
        # 组合双通道的预测
        turtle_pred = np.dot(self.turtle_channel['weights'], features)
        rabbit_pred = np.dot(self.rabbit_channel['weights'], features)
        
        # 动态加权：近期变化大则兔通道权重高
        stability = self._calculate_stability_score()
        combined_pred = turtle_pred * stability + rabbit_pred * (1 - stability)
        
        return np.clip(combined_pred, -1.0, 1.0)
    
    def get_learning_stats(self):
        """返回学习统计信息"""
        return {
            'channels': {
                'turtle': {
                    'weight_norm': np.linalg.norm(self.turtle_channel['weights']) 
                                  if self.turtle_channel['weights'] is not None else 0,
                    'learning_rate': self.turtle_channel['learning_rate'],
                    'last_updated': self.turtle_channel['last_updated']
                },
                'rabbit': {
                    'weight_norm': np.linalg.norm(self.rabbit_channel['weights']) 
                                  if self.rabbit_channel['weights'] is not None else 0,
                    'learning_rate': self.rabbit_channel['learning_rate'],
                    'last_updated': self.rabbit_channel['last_updated']
                }
            },
            'history_size': len(self.learning_history),
            'conflicts': len(self.conflict_history),
            'performance': {
                'avg_adaptation_time': np.mean(self.performance_metrics['avg_adaptation_time']) 
                                     if self.performance_metrics['avg_adaptation_time'] else 0,
                'habit_stability': np.mean(self.performance_metrics['habit_stability']) 
                                  if self.performance_metrics['habit_stability'] else 0.5
            }
        }
    