#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
项目缓存管理器 - 高效的多层缓存系统
使用SQLite索引和pickle序列化实现快速缓存
"""

import os
import json
import pickle
import hashlib
import sqlite3
import threading
from pathlib import Path
from typing import Dict, Optional, Any, Union
from datetime import datetime, timedelta
from dataclasses import dataclass
from collections import OrderedDict

from ..utils_module.logger import ObfuscationLogger
from ..builtin_config_module.builtin_config import BuiltInConfig


@dataclass
class CacheEntry:
    """缓存条目"""
    cache_key: str
    project_path: str
    created_at: datetime
    accessed_at: datetime
    data_file: str
    metadata: Dict[str, Any]


class ProjectCacheManager:
    """项目分析缓存管理器"""

    def __init__(
        self,
        config: BuiltInConfig,
        logger: ObfuscationLogger,
        namespace: str = "analysis",
        root_override: Optional[Union[str, Path]] = None
    ):
        """
        初始化缓存管理器

        Args:
            config: 配置对象
            logger: 日志记录器
            namespace: 命名空间，用于区分不同功能的缓存目录
            root_override: 自定义缓存根目录（可选，仅用于测试或特例）
        """
        if not isinstance(config, BuiltInConfig):
            raise TypeError("ProjectCacheManager 需要 BuiltInConfig 实例")

        self.logger = logger
        self.config = config

        base_dir = Path(root_override) if root_override else Path(self.config.temp_dir)
        base_dir = base_dir.expanduser()
        if not base_dir.is_absolute():
            base_dir = Path.cwd() / base_dir
        base_dir = base_dir.resolve()

        namespace_path = Path(namespace) if namespace else Path("analysis")
        if namespace_path.is_absolute():
            namespace_path = namespace_path.relative_to(namespace_path.anchor)

        self.cache_root = (base_dir / ".obfuscator_cache")
        self.cache_root.mkdir(parents=True, exist_ok=True)

        self.cache_dir = (self.cache_root / namespace_path).resolve()
        self.cache_dir.mkdir(parents=True, exist_ok=True)

        # 初始化数据库和缓存结构
        self._init_database()

    def save_to_file(self, file_path: str, data: Any) -> bool:
        """保存数据到文件"""
        try:
            # 使用自定义编码器处理set等特殊类型
            from ..analysis_module.incremental_analyzer import StateJSONEncoder
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump(data, f, cls=StateJSONEncoder, ensure_ascii=False, indent=2)
            return True
        except Exception as e:
            self.logger.log_warning(f"缓存保存失败: {e}")
            return False

    def load_from_file(self, file_path: str) -> Dict[str, Any]:
        """从文件加载数据"""
        try:
            if os.path.exists(file_path):
                with open(file_path, 'r', encoding='utf-8') as f:
                    return json.load(f)
        except Exception as e:
            self.logger.log_error(f"加载缓存文件失败: {e}")
        return {}

    def _init_database(self):
        """初始化数据库"""
        # 索引数据库
        self.db_path = self.cache_dir / "cache_index.db"
        self.db_lock = threading.Lock()  # 数据库访问锁

        # 内存缓存（真正的LRU实现）
        # 使用 OrderedDict 实现 LRU，保持插入顺序，并在访问时移动到末尾
        self.memory_cache = OrderedDict()
        self.memory_cache_limit = 100  # 最多缓存100个项目
        self.memory_cache_lock = threading.Lock()

        # 初始化数据库
        self.init_database()

        self.logger.log_operation("缓存管理器", f"初始化完成，缓存目录: {self.cache_dir}")

    def init_database(self):
        """初始化SQLite索引数据库"""
        with self.db_lock:
            conn = sqlite3.connect(str(self.db_path))
            cursor = conn.cursor()

            # 创建缓存索引表
            cursor.execute('''
                CREATE TABLE IF NOT EXISTS cache_index (
                    cache_key TEXT PRIMARY KEY,
                    project_path TEXT NOT NULL,
                    created_at TIMESTAMP NOT NULL,
                    accessed_at TIMESTAMP NOT NULL,
                    file_count INTEGER DEFAULT 0,
                    symbol_count INTEGER DEFAULT 0,
                    cache_size_bytes INTEGER DEFAULT 0,
                    platform TEXT,
                    version TEXT DEFAULT '1.0.0',
                    data_file TEXT NOT NULL
                )
            ''')

            # 创建文件哈希表（用于增量检测）
            cursor.execute('''
                CREATE TABLE IF NOT EXISTS file_hashes (
                    file_path TEXT PRIMARY KEY,
                    cache_key TEXT NOT NULL,
                    file_hash TEXT NOT NULL,
                    modified_time REAL NOT NULL,
                    file_size INTEGER,
                    FOREIGN KEY (cache_key) REFERENCES cache_index(cache_key)
                        ON DELETE CASCADE
                )
            ''')

            # 创建索引以提高查询性能
            cursor.execute('''
                CREATE INDEX IF NOT EXISTS idx_cache_key
                ON file_hashes(cache_key)
            ''')

            cursor.execute('''
                CREATE INDEX IF NOT EXISTS idx_accessed
                ON cache_index(accessed_at)
            ''')

            conn.commit()
            conn.close()

    def compute_cache_key(self, project_path: str) -> str:
        """
        计算项目的缓存键

        Args:
            project_path: 项目路径

        Returns:
            缓存键
        """
        # 规范化路径
        normalized_path = os.path.abspath(project_path)

        # 基于项目路径和配置生成唯一键
        key_source = f"{normalized_path}:{self.config.mode}:{self.config.platform}"

        # 添加文件过滤配置
        if hasattr(self.config, 'file_filters'):
            key_source += f":{','.join(self.config.file_filters.include_extensions)}"
            key_source += f":{','.join(self.config.file_filters.exclude_directories)}"

        # 添加配置版本
        key_source += ":v1.0"

        return hashlib.sha256(key_source.encode()).hexdigest()[:16]

    def get_cached_analysis(self, project_path: str) -> Optional[Any]:
        """
        获取缓存的分析结果

        Args:
            project_path: 项目路径

        Returns:
            缓存的分析结果，如果没有或已失效则返回None
        """
        cache_key = self.compute_cache_key(project_path)

        # 1. 检查内存缓存
        with self.memory_cache_lock:
            if cache_key in self.memory_cache:
                self.logger.log_operation("缓存命中", f"内存缓存: {cache_key}")
                # LRU: 移动到末尾（标记为最近使用）
                self.memory_cache.move_to_end(cache_key)
                self._update_access_time(cache_key)
                return self.memory_cache[cache_key]

        # 2. 检查磁盘缓存
        cache_entry = self._get_cache_entry(cache_key)
        if not cache_entry:
            self.logger.log_operation("缓存未命中", f"无缓存: {cache_key}")
            return None

        # 3. 验证缓存有效性
        if not self.is_cache_valid(cache_key, project_path):
            self.logger.log_operation("缓存失效", f"文件已修改: {cache_key}")
            self.invalidate_cache(cache_key)
            return None

        # 4. 加载缓存数据
        cache_file = self.cache_dir / cache_entry['data_file']
        if not cache_file.exists():
            self.logger.log_warning(f"缓存文件不存在: {cache_file}")
            return None

        try:
            with open(cache_file, 'rb') as f:
                data = pickle.load(f)

            # 更新内存缓存
            with self.memory_cache_lock:
                self._update_memory_cache(cache_key, data)

            # 更新访问时间
            self._update_access_time(cache_key)

            self.logger.log_operation("缓存命中", f"磁盘缓存: {cache_key}")
            return data

        except Exception as e:
            self.logger.log_error(f"缓存加载失败: {e}")
            self.invalidate_cache(cache_key)
            return None

    def save_analysis(self, project_path: str, result: Any):
        """
        保存分析结果到缓存

        Args:
            project_path: 项目路径
            result: 分析结果
        """
        cache_key = self.compute_cache_key(project_path)

        # 1. 保存数据到磁盘
        data_file = f"{cache_key}.pkl"
        cache_file = self.cache_dir / data_file

        try:
            with open(cache_file, 'wb') as f:
                pickle.dump(result, f, protocol=pickle.HIGHEST_PROTOCOL)

            cache_size = cache_file.stat().st_size

        except Exception as e:
            self.logger.log_error(f"缓存保存失败: {e}")
            return

        # 2. 提取统计信息
        file_count = 0
        symbol_count = 0
        platform = 'unknown'

        if hasattr(result, 'files') and isinstance(result.files, dict):
            file_count = len(result.files)
        elif hasattr(result, 'file_index'):
            file_count = len(result.file_index)

        if hasattr(result, 'statistics') and isinstance(result.statistics, dict):
            symbol_count = result.statistics.get('total_symbols', 0)

        if hasattr(result, 'platform'):
            platform_attr = result.platform
            if hasattr(platform_attr, 'value'):
                platform = platform_attr.value
            else:
                platform = str(platform_attr)

        # 3. 更新数据库索引
        self._save_cache_entry(
            cache_key=cache_key,
            project_path=project_path,
            data_file=data_file,
            file_count=file_count,
            symbol_count=symbol_count,
            cache_size=cache_size,
            platform=platform
        )

        # 4. 保存文件哈希
        self._save_file_hashes(cache_key, project_path, result)

        # 5. 更新内存缓存
        with self.memory_cache_lock:
            self._update_memory_cache(cache_key, result)

        self.logger.log_operation("缓存已保存",
                                f"键: {cache_key}, 大小: {cache_size/1024:.1f}KB")

    # -------------------- KV 接口（兼容旧API） --------------------

    def set(self, key: str, value: Any) -> bool:
        """保存通用键值数据，与旧模块保持兼容"""
        try:
            # 使用自定义编码器处理set等特殊类型
            from ..analysis_module.incremental_analyzer import StateJSONEncoder
            file_path = self.cache_dir / f"kv_{hashlib.sha256(key.encode()).hexdigest()[:16]}.json"
            with open(file_path, 'w', encoding='utf-8') as f:
                json.dump({'key': key, 'value': value}, f, cls=StateJSONEncoder, ensure_ascii=False, indent=2)
            return True
        except Exception as e:
            self.logger.log_warning(f"KV缓存保存失败: {e}")
            return False

    def get(self, key: str) -> Optional[Any]:
        """读取通用键值数据"""
        file_path = self.cache_dir / f"kv_{hashlib.sha256(key.encode()).hexdigest()[:16]}.json"
        if not file_path.exists():
            return None
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            return data.get('value')
        except Exception as e:
            self.logger.log_error(f"KV缓存读取失败: {e}")
            return None

    def delete(self, key: str) -> bool:
        """删除通用键值缓存"""
        file_path = self.cache_dir / f"kv_{hashlib.sha256(key.encode()).hexdigest()[:16]}.json"
        if file_path.exists():
            try:
                file_path.unlink()
                return True
            except Exception as e:
                self.logger.log_error(f"KV缓存删除失败: {e}")
        return False

    def is_cache_valid(self, cache_key: str, project_path: str) -> bool:
        """
        检查缓存是否有效

        Args:
            cache_key: 缓存键
            project_path: 项目路径

        Returns:
            是否有效
        """
        with self.db_lock:
            conn = sqlite3.connect(str(self.db_path))
            cursor = conn.cursor()

            # 获取缓存的文件哈希
            cursor.execute('''
                SELECT file_path, file_hash, modified_time, file_size
                FROM file_hashes
                WHERE cache_key = ?
            ''', (cache_key,))

            cached_files = cursor.fetchall()
            conn.close()

        if not cached_files:
            return False

        # 检查每个文件是否被修改
        for file_path, cached_hash, cached_mtime, cached_size in cached_files:
            if not os.path.exists(file_path):
                # 文件被删除
                return False

            current_stat = os.stat(file_path)
            current_mtime = current_stat.st_mtime
            current_size = current_stat.st_size

            # 快速检查：大小和修改时间
            if current_size != cached_size or current_mtime > cached_mtime:
                # 进一步检查内容哈希
                current_hash = self._compute_file_hash(file_path)
                if current_hash != cached_hash:
                    return False

        return True

    def invalidate_cache(self, cache_key: str):
        """
        使缓存失效

        Args:
            cache_key: 缓存键
        """
        # 删除磁盘文件
        cache_files = self.cache_dir.glob(f"{cache_key}*")
        for cache_file in cache_files:
            try:
                cache_file.unlink()
            except:
                pass

        # 删除内存缓存
        with self.memory_cache_lock:
            if cache_key in self.memory_cache:
                del self.memory_cache[cache_key]

        # 删除数据库记录
        with self.db_lock:
            conn = sqlite3.connect(str(self.db_path))
            cursor = conn.cursor()

            cursor.execute('DELETE FROM file_hashes WHERE cache_key = ?', (cache_key,))
            cursor.execute('DELETE FROM cache_index WHERE cache_key = ?', (cache_key,))

            conn.commit()
            conn.close()

        self.logger.log_operation("缓存已失效", cache_key)

    def clean_expired_cache(self, max_age_days: int = 7):
        """
        清理过期缓存

        Args:
            max_age_days: 最大保存天数
        """
        with self.db_lock:
            conn = sqlite3.connect(str(self.db_path))
            cursor = conn.cursor()

            expiry_date = datetime.now() - timedelta(days=max_age_days)

            # 获取过期的缓存键
            cursor.execute('''
                SELECT cache_key, data_file
                FROM cache_index
                WHERE accessed_at < ?
            ''', (expiry_date,))

            expired_entries = cursor.fetchall()
            conn.close()

        # 删除过期缓存
        for cache_key, data_file in expired_entries:
            self.invalidate_cache(cache_key)

        self.logger.log_operation("缓存清理",
                                f"已删除 {len(expired_entries)} 个过期缓存")

    def get_cache_statistics(self) -> Dict[str, Any]:
        """
        获取缓存统计信息

        Returns:
            统计信息
        """
        with self.db_lock:
            conn = sqlite3.connect(str(self.db_path))
            cursor = conn.cursor()

            # 获取缓存总数
            cursor.execute('SELECT COUNT(*) FROM cache_index')
            total_entries = cursor.fetchone()[0]

            # 获取总大小
            cursor.execute('SELECT SUM(cache_size_bytes) FROM cache_index')
            total_size = cursor.fetchone()[0] or 0

            # 获取平台分布
            cursor.execute('''
                SELECT platform, COUNT(*)
                FROM cache_index
                GROUP BY platform
            ''')
            platform_distribution = dict(cursor.fetchall())

            conn.close()

        # 内存缓存信息
        with self.memory_cache_lock:
            memory_entries = len(self.memory_cache)

        return {
            'total_entries': total_entries,
            'total_size_mb': total_size / 1024 / 1024,
            'memory_entries': memory_entries,
            'platform_distribution': platform_distribution,
            'cache_dir': str(self.cache_dir)
        }

    def _get_cache_entry(self, cache_key: str) -> Optional[Dict]:
        """获取缓存条目"""
        with self.db_lock:
            conn = sqlite3.connect(str(self.db_path))
            cursor = conn.cursor()

            cursor.execute('''
                SELECT project_path, created_at, accessed_at,
                       data_file, file_count, symbol_count, platform
                FROM cache_index
                WHERE cache_key = ?
            ''', (cache_key,))

            row = cursor.fetchone()
            conn.close()

        if row:
            return {
                'project_path': row[0],
                'created_at': row[1],
                'accessed_at': row[2],
                'data_file': row[3],
                'file_count': row[4],
                'symbol_count': row[5],
                'platform': row[6]
            }

        return None

    def _save_cache_entry(self, cache_key: str, project_path: str,
                         data_file: str, file_count: int,
                         symbol_count: int, cache_size: int,
                         platform: str):
        """保存缓存条目"""
        with self.db_lock:
            conn = sqlite3.connect(str(self.db_path))
            cursor = conn.cursor()

            now = datetime.now()

            cursor.execute('''
                INSERT OR REPLACE INTO cache_index
                (cache_key, project_path, created_at, accessed_at,
                 file_count, symbol_count, cache_size_bytes,
                 platform, data_file)
                VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
            ''', (
                cache_key, project_path, now, now,
                file_count, symbol_count, cache_size,
                platform, data_file
            ))

            conn.commit()
            conn.close()

    def _save_file_hashes(self, cache_key: str, project_path: str, result: Any):
        """保存文件哈希信息"""
        file_iterable = None

        if hasattr(result, 'files') and isinstance(result.files, dict):
            file_iterable = result.files.items()
        elif hasattr(result, 'file_index'):
            file_iterable = result.file_index.items()
        else:
            return

        with self.db_lock:
            conn = sqlite3.connect(str(self.db_path))
            cursor = conn.cursor()

            # 先删除旧的哈希
            cursor.execute('DELETE FROM file_hashes WHERE cache_key = ?', (cache_key,))

            # 保存新的哈希
            for file_path, file_info in file_iterable:
                path_value = file_path

                if isinstance(file_info, dict):
                    path_value = file_info.get('path', file_path)
                elif hasattr(file_info, 'path'):
                    path_value = getattr(file_info, 'path')

                if not isinstance(path_value, str):
                    continue

                normalized_path = os.path.abspath(path_value)

                if os.path.exists(normalized_path):
                    file_hash = self._compute_file_hash(normalized_path)
                    file_stat = os.stat(normalized_path)

                    cursor.execute('''
                        INSERT INTO file_hashes
                        (file_path, cache_key, file_hash, modified_time, file_size)
                        VALUES (?, ?, ?, ?, ?)
                    ''', (
                        normalized_path, cache_key, file_hash,
                        file_stat.st_mtime, file_stat.st_size
                    ))

            conn.commit()
            conn.close()

    def _compute_file_hash(self, file_path: str) -> str:
        """
        计算文件哈希值（多点采样版本）

        策略：
        - 小文件（< 1MB）：完整哈希，确保100%准确性
        - 大文件（>= 1MB）：5点采样（0%, 25%, 50%, 75%, 100%），平衡性能和准确性

        Args:
            file_path: 文件路径

        Returns:
            哈希值
        """
        hasher = hashlib.md5()

        try:
            file_size = os.path.getsize(file_path)

            # 小文件：完整哈希（< 1MB）
            if file_size < 1024 * 1024:
                with open(file_path, 'rb') as f:
                    while True:
                        chunk = f.read(8192)
                        if not chunk:
                            break
                        hasher.update(chunk)
                return hasher.hexdigest()

            # 大文件：多点采样策略
            sample_size = 8192  # 每个采样点8KB
            num_samples = 5     # 5个采样点

            with open(file_path, 'rb') as f:
                # 采样点位置：0%, 25%, 50%, 75%, 100%
                for i in range(num_samples):
                    # 计算采样偏移量
                    if i == num_samples - 1:
                        # 最后一个采样点：文件末尾
                        offset = max(0, file_size - sample_size)
                    else:
                        # 其他采样点：按比例分布
                        offset = (file_size - sample_size) * i // (num_samples - 1)

                    f.seek(offset)
                    sample = f.read(sample_size)
                    hasher.update(sample)

                # 添加文件大小，增加唯一性
                hasher.update(str(file_size).encode())

            return hasher.hexdigest()

        except FileNotFoundError:
            # 文件不存在是预期情况（如文件已删除），静默处理
            # 不记录日志，避免误导用户
            return ""
        except PermissionError:
            # 权限错误
            self.logger.log_warning(f"无权限读取文件: {file_path}")
            return ""
        except Exception as e:
            # 其他未预期的错误才使用ERROR级别
            self.logger.log_error(f"计算文件哈希失败 {file_path}: {e}")
            return ""

    def _update_memory_cache(self, cache_key: str, data: Any):
        """
        更新内存缓存（真正的LRU策略）

        使用 OrderedDict 实现 LRU:
        1. 如果键已存在，先删除（这样重新插入时会移到末尾）
        2. 如果超过限制，删除最旧的项（开头的项）
        3. 插入新项到末尾（表示最近使用）

        Args:
            cache_key: 缓存键
            data: 数据
        """
        # 如果键已存在，先删除（这样重新插入时会成为"最近使用"）
        if cache_key in self.memory_cache:
            del self.memory_cache[cache_key]

        # 如果超过限制，删除最旧的项（LRU - 最少最近使用）
        if len(self.memory_cache) >= self.memory_cache_limit:
            # popitem(last=False) 删除并返回第一个（最旧的）项
            oldest_key, _ = self.memory_cache.popitem(last=False)
            if self.logger and self.config.logging.verbose:
                self.logger.log_operation(
                    "LRU缓存",
                    f"淘汰最旧缓存项: {oldest_key[:16]}..."
                )

        # 插入到末尾（最近使用）
        self.memory_cache[cache_key] = data

    def _update_access_time(self, cache_key: str):
        """更新缓存访问时间"""
        with self.db_lock:
            conn = sqlite3.connect(str(self.db_path))
            cursor = conn.cursor()

            cursor.execute('''
                UPDATE cache_index
                SET accessed_at = ?
                WHERE cache_key = ?
            ''', (datetime.now(), cache_key))

            conn.commit()
            conn.close()
