import numpy as np
from sklearn.mixture import GaussianMixture
from hmmlearn.hmm import GaussianHMM
from datetime import datetime, timedelta

class UserProfile:
    """
    动态用户画像引擎，基于高斯混合模型和隐马尔可夫模型
    实现用户交互习惯的建模与预测
    """
    def __init__(self, user_id):
        self.user_id = user_id
        self.created_at = datetime.now()
        
        # 风格偏好模型 - 详细度使用高斯混合模型聚类
        self.detail_level_gmm = GaussianMixture(n_components=3, random_state=42)
        self.detail_levels = ['concise', 'balanced', 'detailed']
        
        # 正式度使用隐马尔可夫模型
        self.formality_hmm = GaussianHMM(n_components=5, covariance_type="diag", n_iter=100)
        self.formality_levels = ['very_casual', 'casual', 'neutral', 'formal', 'very_formal']
        
        # 情境感知参数
        self.context_awareness = {
            'work': {
                'avg_response_time': 2.1,
                'jargon_tolerance': 0.8,
                'data_points': [],
                'last_updated': datetime.now()
            },
            'casual': {
                'emoji_usage': 3.2,
                'digression_allowance': 0.6,
                'data_points': [],
                'last_updated': datetime.now()
            },
            'learning': {
                'question_frequency': 1.8,
                'explanation_depth': 0.9,
                'data_points': [],
                'last_updated': datetime.now()
            }
        }
        
        # 用户行为历史 - 用于模型训练
        self.behavior_history = {
            'features': [],
            'contexts': [],
            'timestamps': [],
            'feedback_scores': []
        }
        
        # 模型训练状态
        self.models_trained = False
        self.min_training_samples = 20  # 训练模型所需的最小样本数
        
        # 基于MIT研究的动态衰减因子参数 (3-7天周期)
        self.decay_base = 0.7
        self.preference_change_period = 5  # 平均5天的偏好变化周期

    def _get_decay_factor(self, timestamp):
        """
        根据MIT实验数据计算动态衰减因子
        考虑用户偏好的3-7天变化周期
        """
        time_diff = datetime.now() - datetime.fromtimestamp(timestamp)
        days_passed = time_diff.days
        
        # 动态调整衰减率，基于偏好变化周期
        if days_passed < self.preference_change_period:
            return self.decay_base **(days_passed / self.preference_change_period)
        else:
            # 超过周期后加速衰减
            return self.decay_base** (1 + (days_passed - self.preference_change_period)/7)

    def update_from_behavior(self, behavior_data, context):
        """
        从用户行为数据更新用户画像
        应用动态衰减因子，解决时间衰减缺陷
        """
        # 确保行为数据包含必要字段
        required_fields = ['timestamp', 'features', 'raw_input']
        if not all(field in behavior_data for field in required_fields):
            raise ValueError("行为数据缺少必要字段")
        
        # 获取衰减因子
        decay_factor = self._get_decay_factor(behavior_data['timestamp'])
        
        # 应用衰减到历史数据
        self._apply_decay_to_history(decay_factor)
        
        # 添加新的行为数据
        self.behavior_history['features'].append(behavior_data['features'])
        self.behavior_history['contexts'].append(context)
        self.behavior_history['timestamps'].append(behavior_data['timestamp'])
        self.behavior_history['feedback_scores'].append(0.5)  # 初始反馈分数
        
        # 更新情境感知参数
        if context in self.context_awareness:
            self.context_awareness[context]['data_points'].append(behavior_data['features'])
            self.context_awareness[context]['last_updated'] = datetime.now()
        
        # 当有足够样本时训练模型
        if len(self.behavior_history['features']) >= self.min_training_samples and \
           len(self.behavior_history['features']) % 5 == 0:  # 每5个新样本训练一次
            self._train_models()
            return True
        
        return False

    def _apply_decay_to_history(self, decay_factor):
        """对历史数据应用衰减，解决时间衰减缺陷"""
        for i in range(len(self.behavior_history['feedback_scores'])):
            self.behavior_history['feedback_scores'][i] *= decay_factor

    def _train_models(self):
        """训练用户偏好模型"""
        # 转换为numpy数组
        features_array = np.array(self.behavior_history['features'])
        
        # 训练详细度GMM模型
        self.detail_level_gmm.fit(features_array[:, [0]])  # 假设第一个特征与详细度相关
        
        # 训练正式度HMM模型
        # 为HMM准备序列数据（这里简化为每个样本作为一个序列）
        X = features_array[:, [1]].reshape(-1, 1, 1)  # 假设第二个特征与正式度相关
        lengths = [1] * len(X)
        self.formality_hmm.fit(X, lengths)
        
        self.models_trained = True

    def predict_preferences(self, features):
        """
        预测用户偏好，包括详细度和正式度
        解决情境剥离问题
        """
        if not self.models_trained and len(self.behavior_history['features']) < 5:
            # 冷启动方案：前100次交互的默认策略
            return {
                'detail_level': 'balanced',
                'formality': 'neutral',
                'context': 'work',  # 默认情境
                'confidence': 0.5
            }
        
        # 提取相关特征
        detail_feature = features[0].reshape(1, -1)
        formality_feature = features[1].reshape(1, -1)
        
        # 预测详细度
        detail_probs = self.detail_level_gmm.predict_proba(detail_feature)[0]
        detail_idx = np.argmax(detail_probs)
        detail_level = self.detail_levels[detail_idx]
        
        # 预测正式度
        formality_probs = self.formality_hmm.predict_proba(formality_feature.reshape(1, 1, 1))[0]
        formality_idx = np.argmax(formality_probs)
        formality_level = self.formality_levels[formality_idx]
        
        # 确定当前最可能的情境
        context_scores = {}
        for context in self.context_awareness:
            if len(self.context_awareness[context]['data_points']) > 0:
                context_features = np.mean(self.context_awareness[context]['data_points'], axis=0)
                context_scores[context] = np.dot(features, context_features) / (np.linalg.norm(features) * np.linalg.norm(context_features) + 1e-8)
        
        context = max(context_scores, key=context_scores.get) if context_scores else 'work'
        
        # 计算整体置信度
        confidence = (np.max(detail_probs) + np.max(formality_probs) + 
                     (context_scores[context] + 1) / 2) / 3
        
        return {
            'detail_level': detail_level,
            'formality': formality_level,
            'context': context,
            'confidence': confidence
        }

    def get_profile_summary(self):
        """返回用户画像摘要"""
        return {
            'user_id': self.user_id,
            'models_trained': self.models_trained,
            'behavior_samples': len(self.behavior_history['features']),
            'last_updated': datetime.fromtimestamp(self.behavior_history['timestamps'][-1]) 
                            if self.behavior_history['timestamps'] else None,
            'context_awareness': {
                ctx: {
                    'data_points': len(info['data_points']),
                    'last_updated': info['last_updated']
                } for ctx, info in self.context_awareness.items()
            }
        }
    