"""
缓存管理模块
负责数据缓存的创建、更新、清理和信息获取
"""

import time
import threading
from functools import lru_cache


class CacheManager:
    """缓存管理器"""
    
    def __init__(self, cache_duration=600):
        """
        初始化缓存管理器
        
        Args:
            cache_duration: 缓存有效期（秒），默认10分钟
        """
        self.data_cache = None
        self.processed_data_cache = {}
        self.last_load_time = 0
        self.cache_duration = cache_duration
        self.cache_lock = threading.Lock()
    
    def set_data_cache(self, data):
        """
        设置主数据缓存
        
        Args:
            data: 要缓存的数据
        """
        with self.cache_lock:
            self.data_cache = data
            self.last_load_time = time.time()
    
    def get_data_cache(self):
        """
        获取主数据缓存
        
        Returns:
            缓存的数据或None
        """
        with self.cache_lock:
            if self.data_cache is not None and self._is_cache_valid():
                return self.data_cache
            return None
    
    def set_processed_cache(self, cache_key, data):
        """
        设置处理后数据的缓存
        
        Args:
            cache_key: 缓存键
            data: 要缓存的处理后数据
        """
        with self.cache_lock:
            self.processed_data_cache[cache_key] = data
    
    def get_processed_cache(self, cache_key):
        """
        获取处理后数据的缓存
        
        Args:
            cache_key: 缓存键
        
        Returns:
            缓存的数据或None
        """
        with self.cache_lock:
            if cache_key in self.processed_data_cache and self._is_cache_valid():
                return self.processed_data_cache[cache_key]
            return None
    
    def _is_cache_valid(self):
        """
        检查缓存是否有效
        
        Returns:
            bool: 缓存是否有效
        """
        current_time = time.time()
        return current_time - self.last_load_time < self.cache_duration
    
    def is_cache_expired(self):
        """
        检查缓存是否过期
        
        Returns:
            bool: 缓存是否过期
        """
        return not self._is_cache_valid()
    
    def clear_cache(self):
        """清除所有缓存"""
        with self.cache_lock:
            self.data_cache = None
            self.processed_data_cache.clear()
            self.last_load_time = 0
            # 清除LRU缓存
            if hasattr(self, '_lru_cache_clear'):
                self._lru_cache_clear()
        return {"status": "success", "message": "缓存已清除"}
    
    def get_cache_info(self):
        """
        获取缓存信息
        
        Returns:
            dict: 缓存状态信息
        """
        with self.cache_lock:
            current_time = time.time()
            cache_age = current_time - self.last_load_time if self.last_load_time > 0 else 0
            
            return {
                "has_data_cache": self.data_cache is not None,
                "processed_cache_count": len(self.processed_data_cache),
                "cache_age_seconds": cache_age,
                "cache_valid": self._is_cache_valid(),
                "cache_duration": self.cache_duration,
                "last_load_time": self.last_load_time
            }
    
    def get_cache_key(self, max_points=None, start_date=None, end_date=None):
        """
        生成缓存键
        
        Args:
            max_points: 最大数据点数量
            start_date: 开始日期
            end_date: 结束日期
        
        Returns:
            str: 缓存键
        """
        return f"{max_points}_{start_date}_{end_date}"
    
    @lru_cache(maxsize=16)
    def process_data_with_lru_cache(self, data_str, max_points, start_date, end_date):
        """
        使用LRU缓存处理数据
        这个方法与data_processor配合使用
        
        Args:
            data_str: 序列化的数据字符串
            max_points: 最大数据点数量
            start_date: 开始日期
            end_date: 结束日期
        
        Returns:
            处理后的数据
        """
        import pandas as pd
        
        # 将字符串转回DataFrame
        df = pd.read_json(data_str)
        
        # 应用日期筛选
        if start_date and end_date:
            df = df[(df['StandardDate'] >= start_date) & (df['StandardDate'] <= end_date)]
        
        # 如果需要限制数据点数量
        if max_points and len(df) > max_points:
            # 使用更高效的降采样方法
            step = max(1, len(df) // max_points)
            df = df.iloc[::step, :].reset_index(drop=True)
        
        # 转换为字典列表
        return df.to_dict('records')
    
    def __del__(self):
        """析构函数，清理资源"""
        self.clear_cache() 