# -*- coding: utf - 8 -*-
"""
性能优化模块
提供缓存、批处理、内存优化等性能增强功能
"""

import time
import hashlib
import pickle
import logging
from typing import Any, Dict, List, Optional, Callable, Iterator
from functools import wraps, lru_cache
from dataclasses import dataclass
import threading
from collections import defaultdict
import gc

logger = logging.getLogger(__name__)


class PerformanceCache:
    """高性能缓存系统"""

    def __init__(self, max_size: int = 1000, ttl: int = 3600):
        self.max_size = max_size
        self.ttl = ttl
        self._cache = {}
        self._timestamps = {}
        self._access_count = defaultdict(int)
        self._lock = threading.RLock()

    def get(self, key: str) -> Optional[Any]:
        """获取缓存值"""
        with self._lock:
            if key not in self._cache:
                return None

            # 检查TTL
            if time.time() - self._timestamps[key] > self.ttl:
                self._remove(key)
                return None

            self._access_count[key] += 1
            return self._cache[key]

    def set(self, key: str, value: Any):
        """设置缓存值"""
        with self._lock:
            # 如果缓存已满，移除最少使用的项
            if len(self._cache) >= self.max_size and key not in self._cache:
                self._evict_lru()

            self._cache[key] = value
            self._timestamps[key] = time.time()
            self._access_count[key] = 1

    def _remove(self, key: str):
        """移除缓存项"""
        self._cache.pop(key, None)
        self._timestamps.pop(key, None)
        self._access_count.pop(key, None)

    def _evict_lru(self):
        """移除最少使用的缓存项"""
        if not self._cache:
            return

        lru_key = min(self._access_count.keys(), key=lambda k: self._access_count[k])
        self._remove(lru_key)
        logger.debug(f"缓存已满，移除LRU项: {lru_key}")

    def clear(self):
        """清空缓存"""
        with self._lock:
            self._cache.clear()
            self._timestamps.clear()
            self._access_count.clear()

    def stats(self) -> Dict[str, Any]:
        """获取缓存统计信息"""
        with self._lock:
            return {
                'size': len(self._cache),
                'max_size': self.max_size,
                'hit_rate': self._calculate_hit_rate(),
                'memory_usage': self._estimate_memory_usage()
            }

    def _calculate_hit_rate(self) -> float:
        """计算缓存命中率"""
        total_access = sum(self._access_count.values())
        return len(self._cache) / max(total_access, 1)

    def _estimate_memory_usage(self) -> int:
        """估算内存使用量（字节）"""
        try:
            return sum(len(pickle.dumps(v)) for v in self._cache.values())
        except Exception:
            return 0


def cached(ttl: int = 3600, max_size: int = 128):
    """缓存装饰器"""
    cache = PerformanceCache(max_size=max_size, ttl=ttl)

    def decorator(func: Callable):
        @wraps(func)
        def wrapper(*args, **kwargs):
            # 生成缓存键
            key_data = (func.__name__, args, tuple(sorted(kwargs.items())))
            cache_key = hashlib.md5(str(key_data).encode()).hexdigest()

            # 尝试从缓存获取
            result = cache.get(cache_key)
            if result is not None:
                logger.debug(f"缓存命中: {func.__name__}")
                return result

            # 执行函数并缓存结果
            result = func(*args, **kwargs)
            cache.set(cache_key, result)
            logger.debug(f"缓存设置: {func.__name__}")
            return result

        wrapper.cache = cache
        return wrapper
    return decorator


class BatchProcessor:
    """批处理器"""

    def __init__(self, batch_size: int = 1000, flush_interval: float = 5.0):
        self.batch_size = batch_size
        self.flush_interval = flush_interval
        self._batches = defaultdict(list)
        self._last_flush = defaultdict(float)
        self._processors = {}
        self._lock = threading.Lock()

    def register_processor(self, batch_type: str, processor: Callable):
        """注册批处理器"""
        self._processors[batch_type] = processor
        logger.info(f"注册批处理器: {batch_type}")

    def add_item(self, batch_type: str, item: Any):
        """添加项到批次"""
        with self._lock:
            self._batches[batch_type].append(item)

            # 检查是否需要刷新
            current_time = time.time()
            batch = self._batches[batch_type]

            # 初始化最后刷新时间
            if batch_type not in self._last_flush:
                self._last_flush[batch_type] = current_time

            should_flush = (
                len(batch) >= self.batch_size
                or (current_time - self._last_flush[batch_type] > self.flush_interval and len(batch) > 0)
            )

            if should_flush:
                self._flush_batch(batch_type)

    def _flush_batch(self, batch_type: str):
        """刷新批次"""
        if batch_type not in self._processors:
            logger.warning(f"未找到批处理器: {batch_type}")
            return

        batch = self._batches[batch_type]
        if not batch:
            return

        try:
            processor = self._processors[batch_type]
            logger.info(f"处理批次 {batch_type}: {len(batch)} 项")

            start_time = time.time()
            processor(batch)
            process_time = time.time() - start_time

            logger.info(f"批次处理完成: {len(batch)} 项, 耗时: {process_time:.2f}秒")

            # 清空批次
            self._batches[batch_type].clear()
            self._last_flush[batch_type] = time.time()

        except Exception as e:
            logger.error(f"批次处理失败 {batch_type}: {e}")

    def flush_all(self):
        """刷新所有批次"""
        with self._lock:
            for batch_type in list(self._batches.keys()):
                self._flush_batch(batch_type)


class MemoryOptimizer:
    """内存优化器"""

    def __init__(self, gc_threshold: int = 1000, memory_limit_mb: int = 512):
        self.gc_threshold = gc_threshold
        self.memory_limit_mb = memory_limit_mb
        self._operation_count = 0
        self._last_gc_time = time.time()

    def check_memory(self):
        """检查内存使用情况"""
        self._operation_count += 1

        # 定期垃圾回收
        if self._operation_count % self.gc_threshold == 0:
            self._force_gc()

        # 检查内存限制
        try:
            import psutil
            memory_mb = psutil.virtual_memory().used / 1024 / 1024
            if memory_mb > self.memory_limit_mb:
                logger.warning(f"内存使用过高: {memory_mb:.1f}MB")
                self._force_gc()
                return False
        except ImportError:
            pass

        return True

    def _force_gc(self):
        """强制垃圾回收"""
        current_time = time.time()
        if current_time - self._last_gc_time < 1.0:  # 避免频繁GC
            return

        collected = gc.collect()
        self._last_gc_time = current_time
        logger.debug(f"垃圾回收: 清理了 {collected} 个对象")


class StreamProcessor:
    """流式处理器 - 用于处理大文件"""

    def __init__(self, chunk_size: int = 8192):
        self.chunk_size = chunk_size
        self.memory_optimizer = MemoryOptimizer()

    def process_file_stream(self, file_path: str, processor: Callable) -> Iterator[Any]:
        """流式处理文件"""
        logger.info(f"开始流式处理文件: {file_path}")

        try:
            with open(file_path, 'r', encoding='utf - 8') as f:
                buffer = []
                line_count = 0

                for line in f:
                    buffer.append(line.strip())
                    line_count += 1

                    # 达到块大小时处理
                    if len(buffer) >= self.chunk_size:
                        yield from self._process_chunk(buffer, processor)
                        buffer.clear()

                        # 内存检查
                        if not self.memory_optimizer.check_memory():
                            logger.warning("内存不足，暂停处理")
                            time.sleep(0.1)

                # 处理剩余数据
                if buffer:
                    yield from self._process_chunk(buffer, processor)

                logger.info(f"文件处理完成: {line_count} 行")

        except Exception as e:
            logger.error(f"文件流处理失败: {e}")
            raise

    def _process_chunk(self, chunk: List[str], processor: Callable) -> Iterator[Any]:
        """处理数据块"""
        try:
            results = processor(chunk)
            if isinstance(results, (list, tuple)):
                yield from results
            else:
                yield results
        except Exception as e:
            logger.error(f"数据块处理失败: {e}")


@dataclass
class PerformanceMetrics:
    """性能指标"""
    start_time: float
    end_time: float = 0.0
    items_processed: int = 0
    errors_count: int = 0
    memory_peak_mb: float = 0.0

    @property
    def duration(self) -> float:
        return self.end_time - self.start_time if self.end_time else time.time() - self.start_time

    @property
    def throughput(self) -> float:
        duration = self.duration
        return self.items_processed / duration if duration > 0 else 0.0

    def to_dict(self) -> Dict[str, Any]:
        return {
            'duration': self.duration,
            'items_processed': self.items_processed,
            'throughput': self.throughput,
            'errors_count': self.errors_count,
            'memory_peak_mb': self.memory_peak_mb
        }


class PerformanceProfiler:
    """性能分析器"""

    def __init__(self):
        self.metrics = {}
        self._active_sessions = {}

    def start_session(self, session_id: str) -> PerformanceMetrics:
        """开始性能监控会话"""
        metrics = PerformanceMetrics(start_time=time.time())
        self._active_sessions[session_id] = metrics
        logger.info(f"性能监控会话开始: {session_id}")
        return metrics

    def end_session(self, session_id: str) -> Optional[PerformanceMetrics]:
        """结束性能监控会话"""
        if session_id not in self._active_sessions:
            return None

        metrics = self._active_sessions.pop(session_id)
        metrics.end_time = time.time()
        self.metrics[session_id] = metrics

        logger.info(f"性能监控会话结束: {session_id}, 耗时: {metrics.duration:.2f}秒")
        return metrics

    def update_metrics(self, session_id: str, items_processed: int = 0,
                       errors_count: int = 0, memory_mb: float = 0.0):
        """更新性能指标"""
        if session_id in self._active_sessions:
            metrics = self._active_sessions[session_id]
            metrics.items_processed += items_processed
            metrics.errors_count += errors_count
            metrics.memory_peak_mb = max(metrics.memory_peak_mb, memory_mb)

    def get_report(self, session_id: str) -> Optional[Dict[str, Any]]:
        """获取性能报告"""
        metrics = self.metrics.get(session_id) or self._active_sessions.get(session_id)
        return metrics.to_dict() if metrics else None


# 全局实例
performance_cache = PerformanceCache()
batch_processor = BatchProcessor()
memory_optimizer = MemoryOptimizer()
performance_profiler = PerformanceProfiler()
