"""
记忆系统

这个模块实现了 Agent 的记忆能力，展示了 AI Agent 如何：
1. 存储和检索信息
2. 管理短期和长期记忆
3. 进行智能的信息关联
4. 维护对话上下文

记忆系统让 Agent 具有学习和记忆能力，能够从过往经验中学习。
"""

import json
import asyncio
import hashlib
from typing import List, Dict, Any, Optional
from dataclasses import dataclass, asdict
from datetime import datetime, timedelta
from pathlib import Path


@dataclass
class MemoryEntry:
    """记忆条目"""
    content: str                                # 记忆内容
    timestamp: Optional[datetime] = None        # 时间戳
    metadata: Optional[Dict[str, Any]] = None   # 元数据
    importance: float = 5.0                     # 重要性 (1-10)
    access_count: int = 0                       # 访问次数
    last_accessed: Optional[datetime] = None    # 最后访问时间
    tags: Optional[List[str]] = None            # 标签
    entry_id: Optional[str] = None              # 唯一标识
    
    def __post_init__(self):
        if self.timestamp is None:
            self.timestamp = datetime.now()
        if self.metadata is None:
            self.metadata = {}
        if self.tags is None:
            self.tags = []
        if self.entry_id is None:
            # 生成基于内容的唯一ID
            self.entry_id = hashlib.md5(
                (self.content + str(self.timestamp)).encode()
            ).hexdigest()[:8]


class MemorySystem:
    """
    记忆系统类
    
    实现了一个完整的记忆管理系统，包括：
    - 短期记忆（最近的对话和操作）
    - 长期记忆（重要的知识和经验）
    - 智能检索（基于相关性和重要性）
    - 记忆衰减（模拟遗忘过程）
    """
    
    def __init__(self, storage_path: str = "memory_storage.json", max_entries: int = 1000):
        self.storage_path = Path(storage_path)
        self.max_entries = max_entries
        self.entries: List[MemoryEntry] = []
        
        # 记忆配置
        self.short_term_duration = timedelta(hours=2)   # 短期记忆持续时间
        self.importance_threshold = 7.0                 # 长期记忆重要性阈值
        
        # 加载已有记忆
        self._load_memories()
    
    def _load_memories(self):
        """从文件加载记忆"""
        if self.storage_path.exists():
            try:
                with open(self.storage_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                
                self.entries = []
                for entry_data in data.get('entries', []):
                    # 转换时间戳
                    if entry_data.get('timestamp'):
                        entry_data['timestamp'] = datetime.fromisoformat(entry_data['timestamp'])
                    if entry_data.get('last_accessed'):
                        entry_data['last_accessed'] = datetime.fromisoformat(entry_data['last_accessed'])
                    
                    entry = MemoryEntry(**entry_data)
                    self.entries.append(entry)
                    
            except Exception as e:
                print(f"加载记忆失败: {e}")
                self.entries = []
    
    def _save_memories(self):
        """保存记忆到文件"""
        try:
            data = {
                'entries': [],
                'metadata': {
                    'total_entries': len(self.entries),
                    'last_updated': datetime.now().isoformat()
                }
            }
            
            for entry in self.entries:
                entry_dict = asdict(entry)
                # 转换时间戳为字符串
                if entry_dict['timestamp']:
                    entry_dict['timestamp'] = entry_dict['timestamp'].isoformat()
                if entry_dict['last_accessed']:
                    entry_dict['last_accessed'] = entry_dict['last_accessed'].isoformat()
                
                data['entries'].append(entry_dict)
            
            with open(self.storage_path, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
                
        except Exception as e:
            print(f"保存记忆失败: {e}")
    
    async def store(self, entry: MemoryEntry) -> str:
        """
        存储新的记忆条目
        
        这个方法展示了记忆存储的完整过程：
        1. 验证和处理输入
        2. 计算重要性
        3. 分类（短期/长期记忆）
        4. 去重和合并
        5. 维护存储限制
        """
        
        # 1. 检查是否已存在相似记忆
        similar_entry = self._find_similar_memory(entry.content)
        if similar_entry:
            # 更新现有记忆
            similar_entry.access_count += 1
            similar_entry.last_accessed = datetime.now()
            similar_entry.importance = min(10.0, similar_entry.importance + 0.5)
            self._save_memories()
            return f"更新了相似记忆: {similar_entry.entry_id}"
        
        # 2. 计算重要性
        entry.importance = self._calculate_importance(entry)
        
        # 3. 添加标签
        entry.tags = self._generate_tags(entry.content)
        
        # 4. 存储记忆
        self.entries.append(entry)
        
        # 5. 维护存储限制
        if len(self.entries) > self.max_entries:
            self._cleanup_old_memories()
        
        # 6. 保存到文件
        self._save_memories()
        
        memory_type = "长期记忆" if entry.importance >= self.importance_threshold else "短期记忆"
        return f"已存储{memory_type}: {entry.entry_id}"
    
    async def retrieve_relevant(self, query: str, max_results: int = 5) -> List[MemoryEntry]:
        """
        检索相关记忆
        
        使用多种策略来找到最相关的记忆：
        1. 关键词匹配
        2. 标签匹配
        3. 时间相关性
        4. 重要性权重
        """
        
        if not self.entries:
            return []
        
        # 1. 计算每个记忆的相关性分数
        scored_memories = []
        query_lower = query.lower()
        query_words = set(query_lower.split())
        
        for entry in self.entries:
            score = 0.0
            
            # 内容相关性 (40% 权重)
            content_lower = entry.content.lower()
            content_words = set(content_lower.split())
            
            # 关键词匹配
            keyword_matches = len(query_words.intersection(content_words))
            if keyword_matches > 0:
                score += (keyword_matches / len(query_words)) * 4.0
            
            # 完整匹配
            if query_lower in content_lower:
                score += 2.0
            
            # 标签匹配 (20% 权重)
            if entry.tags:
                tag_matches = sum(1 for tag in entry.tags if tag.lower() in query_lower)
                score += (tag_matches / len(entry.tags)) * 2.0
            
            # 重要性 (20% 权重)
            score += (entry.importance / 10.0) * 2.0
            
            # 时间相关性 (10% 权重)
            time_score = self._calculate_time_relevance(entry.timestamp)
            score += time_score * 1.0
            
            # 访问频率 (10% 权重)
            access_score = min(1.0, entry.access_count / 10.0)
            score += access_score * 1.0
            
            if score > 0:
                scored_memories.append((entry, score))
        
        # 2. 排序并返回最相关的记忆
        scored_memories.sort(key=lambda x: x[1], reverse=True)
        
        relevant_memories = []
        for entry, score in scored_memories[:max_results]:
            # 更新访问信息
            entry.access_count += 1
            entry.last_accessed = datetime.now()
            relevant_memories.append(entry)
        
        if relevant_memories:
            self._save_memories()
        
        return relevant_memories
    
    def _find_similar_memory(self, content: str, similarity_threshold: float = 0.8) -> Optional[MemoryEntry]:
        """查找相似的记忆条目"""
        content_lower = content.lower()
        content_words = set(content_lower.split())
        
        for entry in self.entries:
            entry_words = set(entry.content.lower().split())
            
            if not content_words or not entry_words:
                continue
            
            # 计算 Jaccard 相似度
            intersection = content_words.intersection(entry_words)
            union = content_words.union(entry_words)
            similarity = len(intersection) / len(union)
            
            if similarity >= similarity_threshold:
                return entry
        
        return None
    
    def _calculate_importance(self, entry: MemoryEntry) -> float:
        """
        计算记忆重要性
        
        基于多个因素计算记忆的重要性：
        - 内容长度和复杂性
        - 包含的关键信息
        - 用户明确的重要性指示
        """
        importance = 5.0  # 基础重要性
        
        content = entry.content.lower()
        
        # 1. 内容长度因素
        if len(content) > 200:
            importance += 1.0
        elif len(content) < 50:
            importance -= 0.5
        
        # 2. 关键词因素
        important_keywords = [
            "重要", "关键", "核心", "主要", "critical", "important", "key",
            "学习", "经验", "教训", "规则", "原则", "方法"
        ]
        
        keyword_count = sum(1 for keyword in important_keywords if keyword in content)
        importance += keyword_count * 0.5
        
        # 3. 问答格式检测
        if any(marker in content for marker in ["问:", "答:", "Q:", "A:", "问题:", "解答:"]):
            importance += 1.0
        
        # 4. 错误和解决方案
        if any(word in content for word in ["错误", "问题", "解决", "修复", "bug", "fix"]):
            importance += 1.5
        
        # 5. 元数据中的重要性指示
        if entry.metadata:
            if entry.metadata.get("user_marked_important"):
                importance += 2.0
            if entry.metadata.get("type") == "lesson_learned":
                importance += 1.5
        
        return min(10.0, importance)
    
    def _generate_tags(self, content: str) -> List[str]:
        """生成内容标签"""
        tags = []
        content_lower = content.lower()
        
        # 预定义的标签模式
        tag_patterns = {
            "编程": ["代码", "编程", "程序", "开发", "code", "programming"],
            "数学": ["数学", "计算", "算法", "公式", "math"],
            "学习": ["学习", "教程", "教学", "学会", "掌握"],
            "问题": ["问题", "错误", "bug", "问题", "trouble"],
            "解决方案": ["解决", "方法", "方案", "技巧", "solution"],
            "重要": ["重要", "关键", "核心", "必须", "important"],
        }
        
        for tag, keywords in tag_patterns.items():
            if any(keyword in content_lower for keyword in keywords):
                tags.append(tag)
        
        # 提取可能的技术标签
        tech_terms = ["python", "javascript", "api", "database", "ai", "ml"]
        for term in tech_terms:
            if term in content_lower:
                tags.append(term.upper())
        
        return list(set(tags))  # 去重
    
    def _calculate_time_relevance(self, timestamp: datetime) -> float:
        """计算时间相关性 (0-1)"""
        now = datetime.now()
        time_diff = now - timestamp
        
        # 24小时内的记忆具有高时间相关性
        if time_diff <= timedelta(hours=24):
            return 1.0
        # 一周内的记忆具有中等时间相关性
        elif time_diff <= timedelta(days=7):
            return 0.7
        # 一个月内的记忆具有较低时间相关性
        elif time_diff <= timedelta(days=30):
            return 0.4
        # 更久的记忆时间相关性很低
        else:
            return 0.1
    
    def _cleanup_old_memories(self):
        """清理旧记忆以维护存储限制"""
        if len(self.entries) <= self.max_entries:
            return
        
        # 按重要性和时间排序，保留最重要的记忆
        now = datetime.now()
        
        def memory_score(entry: MemoryEntry) -> float:
            # 综合评分：重要性 + 时间相关性 + 访问频率
            importance_score = entry.importance
            time_score = self._calculate_time_relevance(entry.timestamp) * 3
            access_score = min(3.0, entry.access_count / 5.0)
            
            return importance_score + time_score + access_score
        
        # 排序并保留前 80% 的记忆
        self.entries.sort(key=memory_score, reverse=True)
        keep_count = int(self.max_entries * 0.8)
        
        removed_count = len(self.entries) - keep_count
        self.entries = self.entries[:keep_count]
        
        print(f"清理了 {removed_count} 条旧记忆，当前记忆数: {len(self.entries)}")
    
    def get_recent_entries(self, count: int = 10) -> List[MemoryEntry]:
        """获取最近的记忆条目"""
        sorted_entries = sorted(self.entries, key=lambda x: x.timestamp, reverse=True)
        return sorted_entries[:count]
    
    def get_important_entries(self, count: int = 10) -> List[MemoryEntry]:
        """获取最重要的记忆条目"""
        sorted_entries = sorted(self.entries, key=lambda x: x.importance, reverse=True)
        return sorted_entries[:count]
    
    def search_by_tags(self, tags: List[str]) -> List[MemoryEntry]:
        """根据标签搜索记忆"""
        matching_entries = []
        for entry in self.entries:
            if entry.tags and any(tag in entry.tags for tag in tags):
                matching_entries.append(entry)
        
        return sorted(matching_entries, key=lambda x: x.importance, reverse=True)
    
    def get_statistics(self) -> Dict[str, Any]:
        """获取记忆系统统计信息"""
        if not self.entries:
            return {"total_entries": 0}
        
        now = datetime.now()
        short_term_entries = [
            e for e in self.entries 
            if now - e.timestamp <= self.short_term_duration
        ]
        long_term_entries = [
            e for e in self.entries 
            if e.importance >= self.importance_threshold
        ]
        
        all_tags = []
        for entry in self.entries:
            all_tags.extend(entry.tags or [])
        
        tag_counts = {}
        for tag in all_tags:
            tag_counts[tag] = tag_counts.get(tag, 0) + 1
        
        return {
            "total_entries": len(self.entries),
            "short_term_entries": len(short_term_entries),
            "long_term_entries": len(long_term_entries),
            "average_importance": sum(e.importance for e in self.entries) / len(self.entries),
            "total_access_count": sum(e.access_count for e in self.entries),
            "most_common_tags": sorted(tag_counts.items(), key=lambda x: x[1], reverse=True)[:5],
            "oldest_memory": min(self.entries, key=lambda x: x.timestamp).timestamp.isoformat(),
            "newest_memory": max(self.entries, key=lambda x: x.timestamp).timestamp.isoformat()
        }