import numpy as np
import time
from datetime import datetime
import hashlib
import json

class MemoryPool:
    """
    记忆管理系统：实现分层记忆管理
    包含短期记忆和长期记忆，支持动态衰减和记忆重组
    """
    def __init__(self):
        # 记忆存储 - 冷热分离
        self.short_term_memory = {}  # 热记忆：快速访问，短期保留
        self.long_term_memory = {}   # 冷记忆：持久化，长期保留
        
        # 记忆元数据
        self.memory_metadata = {}  # memory_id -> metadata
        
        # 索引系统 - 用于快速检索
        self.context_index = {}    # context -> list of memory_ids
        self.embedding_index = {}  # 简化的向量索引
        
        # 记忆重组参数 - 基于Nash均衡算法
        self.recombination_threshold = 0.7  # 相似度阈值
        self.decay_adjustment_factor = 0.05
        
        # 统计信息
        self.stats = {
            'total_memories': 0,
            'short_term_count': 0,
            'long_term_count': 0,
            'memory_hits': 0,
            'memory_misses': 0,
            'recombinations': 0,
            'avg_memory_lifetime': []
        }
        
        # 记忆迁移参数
        self.short_term_lifetime = 86400  # 短期记忆生命周期：24小时（秒）
        self.migration_threshold = 0.6    # 迁移到长期记忆的重要性阈值
        
        # 冷启动计数器
        self.cold_start_counter = 0
        self.cold_start_threshold = 100   # 冷启动阶段：前100次交互

    def _generate_memory_id(self, user_id, data, timestamp):
        """生成唯一的记忆ID"""
        unique_str = f"{user_id}_{json.dumps(data, sort_keys=True)}_{timestamp}"
        return hashlib.md5(unique_str.encode()).hexdigest()
    
    def _calculate_similarity(self, embedding1, embedding2):
        """计算两个嵌入向量的相似度（余弦相似度）"""
        if len(embedding1) != len(embedding2):
            # 调整长度以匹配
            min_len = min(len(embedding1), len(embedding2))
            embedding1 = embedding1[:min_len]
            embedding2 = embedding2[:min_len]
            
        dot_product = np.dot(embedding1, embedding2)
        norm1 = np.linalg.norm(embedding1)
        norm2 = np.linalg.norm(embedding2)
        
        if norm1 == 0 or norm2 == 0:
            return 0.0
            
        return dot_product / (norm1 * norm2)
    
    def _apply_memory_decay(self, memory_id, current_time=None):
        """应用记忆衰减"""
        if memory_id not in self.memory_metadata:
            return False
            
        metadata = self.memory_metadata[memory_id]
        current_time = current_time or time.time()
        time_diff = current_time - metadata['timestamp']
        
        # 基于时间和重要性的动态衰减
        # 重要性高的记忆衰减更慢
        decay_factor = metadata['importance'] * (0.999 **(time_diff / 3600))
        metadata['current_strength'] = metadata['initial_strength'] * decay_factor
        metadata['last_decay_check'] = current_time
        
        return True
    
    def _migrate_memories(self):
        """将重要的短期记忆迁移到长期记忆"""
        current_time = time.time()
        migrated = 0
        
        # 检查所有短期记忆
        for memory_id in list(self.short_term_memory.keys()):
            metadata = self.memory_metadata.get(memory_id)
            if not metadata:
                continue
                
            # 应用衰减
            self._apply_memory_decay(memory_id, current_time)
            
            # 检查是否应该迁移到长期记忆
            if (metadata['current_strength'] > self.migration_threshold and 
                current_time - metadata['timestamp'] > 3600):  # 至少1小时
                # 迁移数据
                self.long_term_memory[memory_id] = self.short_term_memory.pop(memory_id)
                metadata['storage_type'] = 'long_term'
                migrated += 1
                
                # 更新统计
                self.stats['short_term_count'] -= 1
                self.stats['long_term_count'] += 1
        
        # 清理过期的短期记忆
        expired = 0
        for memory_id in list(self.short_term_memory.keys()):
            metadata = self.memory_metadata.get(memory_id)
            if not metadata:
                continue
                
            if current_time - metadata['timestamp'] > self.short_term_lifetime:
                # 过期，删除
                del self.short_term_memory[memory_id]
                del self.memory_metadata[memory_id]
                expired += 1
                self.stats['short_term_count'] -= 1
                self.stats['total_memories'] -= 1
        
        return migrated, expired
    
    def _recombine_memories(self, context):
        """
        记忆重组：基于Nash均衡算法解决记忆冲突
        合并相似的记忆，增强重要记忆
        """
        if context not in self.context_index:
            return 0
            
        memory_ids = self.context_index[context]
        if len(memory_ids) < 2:
            return 0
            
        # 应用衰减
        current_time = time.time()
        for memory_id in memory_ids:
            self._apply_memory_decay(memory_id, current_time)
        
        # 获取所有记忆的嵌入和强度
        memory_info = []
        for memory_id in memory_ids:
            metadata = self.memory_metadata.get(memory_id)
            if not metadata:
                continue
                
            if memory_id in self.short_term_memory:
                memory = self.short_term_memory[memory_id]
            elif memory_id in self.long_term_memory:
                memory = self.long_term_memory[memory_id]
            else:
                continue
                
            memory_info.append({
                'id': memory_id,
                'embedding': metadata['embedding'],
                'strength': metadata['current_strength'],
                'memory': memory
            })
        
        # 寻找相似的记忆对
        recombined = 0
        processed = set()
        
        for i in range(len(memory_info)):
            if memory_info[i]['id'] in processed:
                continue
                
            for j in range(i+1, len(memory_info)):
                if memory_info[j]['id'] in processed:
                    continue
                    
                # 计算相似度
                similarity = self._calculate_similarity(
                    memory_info[i]['embedding'],
                    memory_info[j]['embedding']
                )
                
                if similarity > self.recombination_threshold:
                    # 基于Nash均衡的冲突解决
                    # 合并两个记忆，权重基于它们的强度
                    total_strength = memory_info[i]['strength'] + memory_info[j]['strength']
                    weight_i = memory_info[i]['strength'] / total_strength
                    weight_j = memory_info[j]['strength'] / total_strength
                    
                    # 创建新的合并记忆
                    merged_memory = {
                        'data': {
                            # 这里简化合并逻辑，实际中需要针对具体数据类型
                            'user_input': memory_info[i]['memory']['data']['user_input'] 
                                          if weight_i > weight_j 
                                          else memory_info[j]['memory']['data']['user_input'],
                            'response': memory_info[i]['memory']['data']['response'] 
                                       if weight_i > weight_j 
                                       else memory_info[j]['memory']['data']['response'],
                            'feedback': (memory_info[i]['memory']['data']['feedback'] * weight_i +
                                        memory_info[j]['memory']['data']['feedback'] * weight_j),
                            'context': context,
                            'signal_type': memory_info[i]['memory']['data']['signal_type']
                        },
                        'metadata': {
                            'merged_from': [memory_info[i]['id'], memory_info[j]['id']],
                            'merge_strength': similarity
                        }
                    }
                    
                    # 创建新记忆ID
                    merged_id = self._generate_memory_id(
                        memory_info[i]['memory']['user_id'],
                        merged_memory['data'],
                        current_time
                    )
                    
                    # 存储新记忆
                    storage_type = 'long_term' if (
                        memory_info[i]['memory']['storage_type'] == 'long_term' or
                        memory_info[j]['memory']['storage_type'] == 'long_term'
                    ) else 'short_term'
                    
                    if storage_type == 'long_term':
                        self.long_term_memory[merged_id] = merged_memory
                        self.stats['long_term_count'] += 1
                    else:
                        self.short_term_memory[merged_id] = merged_memory
                        self.stats['short_term_count'] += 1
                    
                    # 创建元数据
                    self.memory_metadata[merged_id] = {
                        'user_id': memory_info[i]['memory']['user_id'],
                        'timestamp': current_time,
                        'importance': max(memory_info[i]['memory']['importance'], 
                                         memory_info[j]['memory']['importance']),
                        'initial_strength': max(memory_info[i]['strength'], 
                                               memory_info[j]['strength']),
                        'current_strength': max(memory_info[i]['strength'], 
                                               memory_info[j]['strength']),
                        'embedding': (memory_info[i]['embedding'] * weight_i + 
                                     memory_info[j]['embedding'] * weight_j),
                        'context': context,
                        'storage_type': storage_type,
                        'last_decay_check': current_time
                    }
                    
                    # 更新索引
                    if context not in self.context_index:
                        self.context_index[context] = []
                    self.context_index[context].append(merged_id)
                    
                    # 记录嵌入索引
                    self.embedding_index[merged_id] = self.memory_metadata[merged_id]['embedding']
                    
                    # 移除原始记忆
                    for mid in [memory_info[i]['id'], memory_info[j]['id']]:
                        if mid in self.short_term_memory:
                            del self.short_term_memory[mid]
                            self.stats['short_term_count'] -= 1
                        elif mid in self.long_term_memory:
                            del self.long_term_memory[mid]
                            self.stats['long_term_count'] -= 1
                            
                        if mid in self.memory_metadata:
                            del self.memory_metadata[mid]
                            
                        if context in self.context_index and mid in self.context_index[context]:
                            self.context_index[context].remove(mid)
                            
                        if mid in self.embedding_index:
                            del self.embedding_index[mid]
                            
                        processed.add(mid)
                        self.stats['total_memories'] -= 1
                    
                    # 更新统计
                    self.stats['total_memories'] += 1
                    self.stats['recombinations'] += 1
                    recombined += 1
                    
                    break  # 每个记忆只合并一次
        
        return recombined
    
    def store_memory(self, user_id, data, importance=0.5, context=None, embedding=None):
        """存储记忆到适当的记忆池"""
        # 冷启动计数
        self.cold_start_counter += 1
        
        # 确保重要性在合理范围内
        importance = np.clip(importance, 0.1, 1.0)
        
        # 生成记忆ID
        timestamp = time.time()
        memory_id = self._generate_memory_id(user_id, data, timestamp)
        
        # 为冷启动阶段调整重要性
        if self.cold_start_counter <= self.cold_start_threshold:
            importance = min(importance * 1.5, 1.0)  # 冷启动阶段提高记忆重要性
        
        # 决定存储类型
        storage_type = 'long_term' if importance >= self.migration_threshold else 'short_term'
        
        # 存储记忆数据
        memory_data = {
            'user_id': user_id,
            'data': data,
            'importance': importance,
            'timestamp': timestamp,
            'storage_type': storage_type
        }
        
        if storage_type == 'long_term':
            self.long_term_memory[memory_id] = memory_data
            self.stats['long_term_count'] += 1
        else:
            self.short_term_memory[memory_id] = memory_data
            self.stats['short_term_count'] += 1
        
        # 存储元数据
        self.memory_metadata[memory_id] = {
            'user_id': user_id,
            'timestamp': timestamp,
            'importance': importance,
            'initial_strength': importance,
            'current_strength': importance,
            'embedding': embedding if embedding is not None else np.random.rand(100),
            'context': context,
            'storage_type': storage_type,
            'last_decay_check': timestamp
        }
        
        # 更新上下文索引
        if context:
            if isinstance(context, dict):
                context_key = json.dumps(context, sort_keys=True)
            else:
                context_key = str(context)
                
            if context_key not in self.context_index:
                self.context_index[context_key] = []
            self.context_index[context_key].append(memory_id)
        
        # 更新嵌入索引
        if embedding is not None:
            self.embedding_index[memory_id] = embedding
        
        # 更新统计
        self.stats['total_memories'] += 1
        
        # 定期迁移和重组记忆
        if self.stats['total_memories'] % 10 == 0:  # 每10个记忆触发一次
            migrated, expired = self._migrate_memories()
            if context:
                self._recombine_memories(context_key)
        
        return memory_id
    
    def retrieve_memory(self, user_id, query_embedding=None, context=None, top_n=5):
        """检索相关记忆"""
        current_time = time.time()
        candidates = []
        
        # 应用记忆衰减
        self._migrate_memories()
        
        # 基于上下文过滤
        context_key = None
        if context:
            if isinstance(context, dict):
                context_key = json.dumps(context, sort_keys=True)
            else:
                context_key = str(context)
                
            if context_key in self.context_index:
                candidates = self.context_index[context_key].copy()
            else:
                # 没有匹配的上下文，使用所有记忆
                candidates = list(self.short_term_memory.keys()) + list(self.long_term_memory.keys())
        else:
            # 没有上下文，使用所有记忆
            candidates = list(self.short_term_memory.keys()) + list(self.long_term_memory.keys())
        
        # 如果没有查询嵌入，按时间和强度排序
        if query_embedding is None:
            memory_list = []
            for memory_id in candidates:
                metadata = self.memory_metadata.get(memory_id)
                if not metadata or metadata['user_id'] != user_id:
                    continue
                    
                # 应用衰减
                self._apply_memory_decay(memory_id, current_time)
                
                # 获取记忆数据
                if memory_id in self.short_term_memory:
                    memory = self.short_term_memory[memory_id]
                elif memory_id in self.long_term_memory:
                    memory = self.long_term_memory[memory_id]
                else:
                    continue
                    
                memory_list.append((
                    -metadata['timestamp'],  # 负时间戳用于排序（最新的在前）
                    -metadata['current_strength'],  # 负强度用于排序
                    memory_id,
                    memory
                ))
            
            # 排序并返回
            memory_list.sort()
            results = [m[3] for m in memory_list[:top_n]]
            
            # 更新统计
            if results:
                self.stats['memory_hits'] += len(results)
            else:
                self.stats['memory_misses'] += 1
                
            return results
        
        # 基于嵌入相似度检索
        query_embedding = np.array(query_embedding)
        similarity_scores = []
        
        for memory_id in candidates:
            metadata = self.memory_metadata.get(memory_id)
            if not metadata or metadata['user_id'] != user_id:
                continue
                
            # 应用衰减
            self._apply_memory_decay(memory_id, current_time)
            
            # 计算相似度
            if memory_id in self.embedding_index:
                similarity = self._calculate_similarity(
                    query_embedding, 
                    self.embedding_index[memory_id]
                )
                
                # 结合相似度和记忆强度
                combined_score = similarity * metadata['current_strength']
                similarity_scores.append((-combined_score, memory_id))  # 负号用于排序
        
        # 排序并获取top_n结果
        similarity_scores.sort()
        top_memory_ids = [s[1] for s in similarity_scores[:top_n]]
        
        # 获取完整记忆数据
        results = []
        for memory_id in top_memory_ids:
            if memory_id in self.short_term_memory:
                results.append(self.short_term_memory[memory_id])
            elif memory_id in self.long_term_memory:
                results.append(self.long_term_memory[memory_id])
        
        # 更新统计
        if results:
            self.stats['memory_hits'] += len(results)
        else:
            self.stats['memory_misses'] += 1
            
        return results
    
    def get_stats(self):
        """返回记忆池统计信息"""
        # 计算平均记忆生命周期
        avg_lifetime = 0
        if self.stats['avg_memory_lifetime']:
            avg_lifetime = np.mean(self.stats['avg_memory_lifetime'])
        
        return {
            'total_memories': self.stats['total_memories'],
            'short_term_count': self.stats['short_term_count'],
            'long_term_count': self.stats['long_term_count'],
            'memory_hits': self.stats['memory_hits'],
            'memory_misses': self.stats['memory_misses'],
            'hit_rate': self.stats['memory_hits'] / (self.stats['memory_hits'] + 
                                                    self.stats['memory_misses'] + 1e-8),
            'recombinations': self.stats['recombinations'],
            'avg_memory_lifetime': avg_lifetime,
            'cold_start_complete': self.cold_start_counter > self.cold_start_threshold,
            'context_categories': len(self.context_index)
        }
    