"""
DragonQuant性能优化和缓存模块
"""

import os
import pickle
import hashlib
import time
import functools
import logging
from typing import Any, Dict, Optional, Callable, Union
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import pandas as pd
import numpy as np

logger = logging.getLogger(__name__)


class CacheManager:
    """缓存管理器"""
    
    def __init__(self, cache_dir: str = "./cache", max_size: int = 1000, ttl: int = 3600):
        """初始化缓存管理器
        
        Args:
            cache_dir: 缓存目录
            max_size: 最大缓存条目数
            ttl: 生存时间（秒）
        """
        self.cache_dir = Path(cache_dir)
        self.cache_dir.mkdir(parents=True, exist_ok=True)
        
        self.max_size = max_size
        self.ttl = ttl
        self.memory_cache = {}
        self.cache_stats = {'hits': 0, 'misses': 0, 'size': 0}
        
        logger.info(f"Cache manager initialized: {cache_dir}, max_size={max_size}, ttl={ttl}s")
    
    def _generate_key(self, *args, **kwargs) -> str:
        """生成缓存键"""
        # 创建唯一标识符
        key_data = str(args) + str(sorted(kwargs.items()))
        return hashlib.md5(key_data.encode()).hexdigest()
    
    def _get_file_path(self, key: str) -> Path:
        """获取缓存文件路径"""
        return self.cache_dir / f"{key}.pkl"
    
    def _is_expired(self, timestamp: float) -> bool:
        """检查是否过期"""
        return time.time() - timestamp > self.ttl
    
    def get(self, key: str) -> Optional[Any]:
        """获取缓存"""
        # 先检查内存缓存
        if key in self.memory_cache:
            value, timestamp = self.memory_cache[key]
            if not self._is_expired(timestamp):
                self.cache_stats['hits'] += 1
                return value
            else:
                del self.memory_cache[key]
        
        # 检查磁盘缓存
        file_path = self._get_file_path(key)
        if file_path.exists():
            try:
                with open(file_path, 'rb') as f:
                    data = pickle.load(f)
                    value, timestamp = data['value'], data['timestamp']
                
                if not self._is_expired(timestamp):
                    # 加载到内存缓存
                    self.memory_cache[key] = (value, timestamp)
                    self.cache_stats['hits'] += 1
                    return value
                else:
                    # 删除过期文件
                    file_path.unlink()
            except Exception as e:
                logger.warning(f"Failed to load cache {key}: {str(e)}")
        
        self.cache_stats['misses'] += 1
        return None
    
    def set(self, key: str, value: Any):
        """设置缓存"""
        timestamp = time.time()
        
        # 内存缓存大小控制
        if len(self.memory_cache) >= self.max_size:
            # 删除最旧的条目
            oldest_key = min(self.memory_cache.keys(), 
                           key=lambda k: self.memory_cache[k][1])
            del self.memory_cache[oldest_key]
        
        # 设置内存缓存
        self.memory_cache[key] = (value, timestamp)
        
        # 设置磁盘缓存（异步）
        try:
            file_path = self._get_file_path(key)
            with open(file_path, 'wb') as f:
                pickle.dump({'value': value, 'timestamp': timestamp}, f)
        except Exception as e:
            logger.warning(f"Failed to save cache {key}: {str(e)}")
        
        self.cache_stats['size'] = len(self.memory_cache)
    
    def clear(self):
        """清空缓存"""
        self.memory_cache.clear()
        
        # 清空磁盘缓存
        for cache_file in self.cache_dir.glob("*.pkl"):
            try:
                cache_file.unlink()
            except Exception as e:
                logger.warning(f"Failed to delete cache file {cache_file}: {str(e)}")
        
        self.cache_stats = {'hits': 0, 'misses': 0, 'size': 0}
        logger.info("Cache cleared")
    
    def get_stats(self) -> Dict[str, Union[int, float]]:
        """获取缓存统计信息"""
        total_requests = self.cache_stats['hits'] + self.cache_stats['misses']
        hit_rate = self.cache_stats['hits'] / total_requests if total_requests > 0 else 0
        
        return {
            'hits': self.cache_stats['hits'],
            'misses': self.cache_stats['misses'],
            'hit_rate': hit_rate,
            'size': self.cache_stats['size'],
            'max_size': self.max_size
        }


# 全局缓存实例
_global_cache = CacheManager()


def cached(ttl: int = 3600, key_func: Optional[Callable] = None):
    """缓存装饰器
    
    Args:
        ttl: 生存时间（秒）
        key_func: 自定义键生成函数
    """
    def decorator(func):
        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            # 生成缓存键
            if key_func:
                cache_key = key_func(*args, **kwargs)
            else:
                cache_key = _global_cache._generate_key(func.__name__, *args, **kwargs)
            
            # 尝试获取缓存
            result = _global_cache.get(cache_key)
            if result is not None:
                return result
            
            # 执行函数并缓存结果
            result = func(*args, **kwargs)
            _global_cache.set(cache_key, result)
            
            return result
        
        return wrapper
    return decorator


class DataFrameOptimizer:
    """DataFrame性能优化器"""
    
    @staticmethod
    def optimize_dtypes(df: pd.DataFrame) -> pd.DataFrame:
        """优化DataFrame数据类型以减少内存使用"""
        optimized_df = df.copy()
        
        for col in optimized_df.columns:
            col_type = optimized_df[col].dtype
            
            if col_type != 'object':
                c_min = optimized_df[col].min()
                c_max = optimized_df[col].max()
                
                if str(col_type)[:3] == 'int':
                    if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                        optimized_df[col] = optimized_df[col].astype(np.int8)
                    elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                        optimized_df[col] = optimized_df[col].astype(np.int16)
                    elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                        optimized_df[col] = optimized_df[col].astype(np.int32)
                
                elif str(col_type)[:5] == 'float':
                    if c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
                        optimized_df[col] = optimized_df[col].astype(np.float32)
        
        return optimized_df
    
    @staticmethod
    def chunk_dataframe(df: pd.DataFrame, chunk_size: int = 10000):
        """将DataFrame分块处理"""
        for i in range(0, len(df), chunk_size):
            yield df.iloc[i:i + chunk_size]
    
    @staticmethod
    def parallel_apply(df: pd.DataFrame, func: Callable, 
                      axis: int = 0, max_workers: int = 4, chunk_size: int = 1000):
        """并行应用函数到DataFrame"""
        if len(df) < chunk_size:
            return df.apply(func, axis=axis)
        
        chunks = [chunk for chunk in DataFrameOptimizer.chunk_dataframe(df, chunk_size)]
        
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            results = list(executor.map(lambda chunk: chunk.apply(func, axis=axis), chunks))
        
        return pd.concat(results, ignore_index=True)


class PerformanceProfiler:
    """性能分析器"""
    
    def __init__(self):
        self.timings = {}
        self.memory_usage = {}
    
    def time_function(self, name: str):
        """函数计时装饰器"""
        def decorator(func):
            @functools.wraps(func)
            def wrapper(*args, **kwargs):
                start_time = time.time()
                result = func(*args, **kwargs)
                end_time = time.time()
                
                execution_time = end_time - start_time
                if name not in self.timings:
                    self.timings[name] = []
                self.timings[name].append(execution_time)
                
                logger.debug(f"Function {name} executed in {execution_time:.4f}s")
                return result
            
            return wrapper
        return decorator
    
    def get_timing_stats(self) -> Dict[str, Dict[str, float]]:
        """获取计时统计"""
        stats = {}
        for name, times in self.timings.items():
            stats[name] = {
                'count': len(times),
                'total': sum(times),
                'average': sum(times) / len(times),
                'min': min(times),
                'max': max(times)
            }
        return stats
    
    def reset_stats(self):
        """重置统计信息"""
        self.timings.clear()
        self.memory_usage.clear()


class BatchProcessor:
    """批处理器"""
    
    def __init__(self, batch_size: int = 100, max_workers: int = 4):
        self.batch_size = batch_size
        self.max_workers = max_workers
    
    def process_batches(self, items: list, process_func: Callable, 
                       use_processes: bool = False) -> list:
        """批量处理数据"""
        batches = [items[i:i + self.batch_size] 
                  for i in range(0, len(items), self.batch_size)]
        
        executor_class = ProcessPoolExecutor if use_processes else ThreadPoolExecutor
        
        with executor_class(max_workers=self.max_workers) as executor:
            results = list(executor.map(process_func, batches))
        
        # 展平结果
        flattened_results = []
        for batch_result in results:
            if isinstance(batch_result, list):
                flattened_results.extend(batch_result)
            else:
                flattened_results.append(batch_result)
        
        return flattened_results


# 全局实例
profiler = PerformanceProfiler()


def get_cache_manager() -> CacheManager:
    """获取全局缓存管理器"""
    return _global_cache


def clear_all_caches():
    """清空所有缓存"""
    _global_cache.clear()


def get_performance_stats() -> Dict[str, Any]:
    """获取性能统计信息"""
    return {
        'cache_stats': _global_cache.get_stats(),
        'timing_stats': profiler.get_timing_stats()
    }