use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use crate::error::Result;

/// 大对象优化策略
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum LargeObjectStrategy {
    /// 直接分配大对象池
    DirectPool,
    /// 分块处理
    Chunked,
    /// 延迟分配
    LazyAllocation,
    /// 智能选择（根据对象大小和使用模式）
    SmartSelection,
}

impl Default for LargeObjectStrategy {
    fn default() -> Self {
        LargeObjectStrategy::SmartSelection
    }
}

/// 对象大小分类
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum ObjectSize {
    Small(usize),    // < 1KB
    Medium(usize),   // 1KB - 1MB
    Large(usize),    // 1MB - 100MB
    Huge(usize),     // > 100MB
}

impl ObjectSize {
    pub fn classify(size: usize) -> Self {
        match size {
            s if s < 1024 => ObjectSize::Small(s),
            s if s < 1024 * 1024 => ObjectSize::Medium(s),
            s if s < 100 * 1024 * 1024 => ObjectSize::Large(s),
            s => ObjectSize::Huge(s),
        }
    }

    pub fn size(&self) -> usize {
        match self {
            ObjectSize::Small(s) | ObjectSize::Medium(s) | ObjectSize::Large(s) | ObjectSize::Huge(s) => *s,
        }
    }
}

/// 大对象池 - 专门处理大对象的内存管理
pub struct LargeObjectPool {
    strategy: LargeObjectStrategy,
    pools: HashMap<ObjectSize, Arc<Mutex<Vec<Vec<u8>>>>>,
    allocation_stats: Arc<Mutex<AllocationStats>>,
    chunk_size: usize,
}

/// 分配统计信息
#[derive(Debug, Clone, Default)]
pub struct AllocationStats {
    pub total_allocations: usize,
    pub total_deallocations: usize,
    pub bytes_allocated: usize,
    pub bytes_deallocated: usize,
    pub pool_hits: usize,
    pub pool_misses: usize,
    pub size_distribution: HashMap<ObjectSize, usize>,
}

impl LargeObjectPool {
    pub fn new(strategy: LargeObjectStrategy) -> Self {
        let mut pools = HashMap::new();
        
        // 为每种大小类别创建独立的池
        pools.insert(ObjectSize::Medium(1), Arc::new(Mutex::new(Vec::new())));
        pools.insert(ObjectSize::Large(1), Arc::new(Mutex::new(Vec::new())));
        pools.insert(ObjectSize::Huge(1), Arc::new(Mutex::new(Vec::new())));

        LargeObjectPool {
            strategy,
            pools,
            allocation_stats: Arc::new(Mutex::new(AllocationStats::default())),
            chunk_size: 64 * 1024, // 64KB chunks
        }
    }

    /// 智能选择最优策略
    fn select_strategy(&self, size: usize, usage_pattern: Option<&UsagePattern>) -> LargeObjectStrategy {
        match self.strategy {
            LargeObjectStrategy::SmartSelection => {
                let size_class = ObjectSize::classify(size);
                
                if let Some(pattern) = usage_pattern {
                    // 根据使用模式选择策略
                    if pattern.frequent_allocation && pattern.long_lived {
                        LargeObjectStrategy::DirectPool
                    } else if pattern.large_variations {
                        LargeObjectStrategy::Chunked
                    } else {
                        LargeObjectStrategy::LazyAllocation
                    }
                } else {
                    // 默认策略
                    match size_class {
                        ObjectSize::Medium(_) => LargeObjectStrategy::DirectPool,
                        ObjectSize::Large(_) => LargeObjectStrategy::Chunked,
                        ObjectSize::Huge(_) => LargeObjectStrategy::LazyAllocation,
                        _ => LargeObjectStrategy::DirectPool,
                    }
                }
            }
            other => other,
        }
    }

    /// 分配大对象
    pub fn allocate(&mut self, size: usize, usage_pattern: Option<&UsagePattern>) -> Result<Vec<u8>> {
        let strategy = self.select_strategy(size, usage_pattern);
        let size_class = ObjectSize::classify(size);
        
        // 更新统计信息
        {
            let mut stats = self.allocation_stats.lock().unwrap();
            stats.total_allocations += 1;
            stats.bytes_allocated += size;
            *stats.size_distribution.entry(size_class).or_insert(0) += 1;
        }

        match strategy {
            LargeObjectStrategy::DirectPool => self.allocate_from_pool(size, size_class),
            LargeObjectStrategy::Chunked => self.allocate_chunked(size),
            LargeObjectStrategy::LazyAllocation => self.allocate_lazy(size),
            LargeObjectStrategy::SmartSelection => unreachable!(),
        }
    }

    /// 从对象池分配
    fn allocate_from_pool(&mut self, size: usize, size_class: ObjectSize) -> Result<Vec<u8>> {
        // 根据size_class的类型选择对应的池，忽略具体数值
        let pool_key = match size_class {
            ObjectSize::Medium(_) => ObjectSize::Medium(1),
            ObjectSize::Large(_) => ObjectSize::Large(1),
            ObjectSize::Huge(_) => ObjectSize::Huge(1),
            _ => return Ok(vec![0; size]) // 小型对象不使用大对象池
        };
        
        let pool = self.pools.get(&pool_key).unwrap();
        let mut pool_guard = pool.lock().unwrap();
        
        // 查找合适的对象
        if let Some(index) = pool_guard.iter().position(|v| v.capacity() >= size) {
            let mut vec = pool_guard.remove(index);
            vec.resize(size, 0);
            
            let mut stats = self.allocation_stats.lock().unwrap();
            stats.pool_hits += 1;
            
            return Ok(vec);
        }
        
        // 池中没有合适对象，创建新的
        let mut stats = self.allocation_stats.lock().unwrap();
        stats.pool_misses += 1;
        
        Ok(vec![0; size])
    }

    /// 分块分配
    fn allocate_chunked(&mut self, size: usize) -> Result<Vec<u8>> {
        let chunks = (size + self.chunk_size - 1) / self.chunk_size;
        let mut result = Vec::with_capacity(size);
        
        for _ in 0..chunks {
            let chunk_size = std::cmp::min(self.chunk_size, size - result.len());
            result.extend(vec![0; chunk_size]);
        }
        
        Ok(result)
    }

    /// 延迟分配
    fn allocate_lazy(&mut self, size: usize) -> Result<Vec<u8>> {
        // 初始分配较小的空间，后续按需扩展
        let initial_size = std::cmp::min(size, self.chunk_size);
        let mut vec = vec![0; initial_size];
        vec.reserve_exact(size - initial_size);
        
        Ok(vec)
    }

    /// 释放大对象
    pub fn deallocate(&mut self, mut vec: Vec<u8>) {
        let size = vec.len();
        let size_class = ObjectSize::classify(size);
        
        // 更新统计信息
        {
            let mut stats = self.allocation_stats.lock().unwrap();
            stats.total_deallocations += 1;
            stats.bytes_deallocated += size;
        }

        // 清理并重置向量
        vec.clear();
        vec.shrink_to_fit();
        
        // 回收到对应的池
        // 根据size_class的类型选择对应的池，忽略具体数值
        let pool_key = match size_class {
            ObjectSize::Medium(_) => ObjectSize::Medium(1),
            ObjectSize::Large(_) => ObjectSize::Large(1),
            ObjectSize::Huge(_) => ObjectSize::Huge(1),
            _ => return // 小型对象不回收到大对象池
        };
        
        if let Some(pool) = self.pools.get(&pool_key) {
            let mut pool_guard = pool.lock().unwrap();
            if pool_guard.len() < 100 { // 限制池大小
                pool_guard.push(vec);
            }
        }
    }

    /// 获取分配统计信息
    pub fn get_stats(&self) -> AllocationStats {
        self.allocation_stats.lock().unwrap().clone()
    }

    /// 清理空闲对象
    pub fn cleanup(&mut self) {
        for pool in self.pools.values() {
            let mut pool_guard = pool.lock().unwrap();
            pool_guard.clear();
        }
        
        // 重置统计信息
        let mut stats = self.allocation_stats.lock().unwrap();
        *stats = AllocationStats::default();
    }

    /// 设置新的策略
    pub fn set_strategy(&mut self, strategy: LargeObjectStrategy) {
        self.strategy = strategy;
    }

    /// 获取当前策略
    pub fn get_strategy(&self) -> LargeObjectStrategy {
        self.strategy
    }
}

/// 使用模式分析
#[derive(Debug, Clone, Default)]
pub struct UsagePattern {
    pub frequent_allocation: bool,
    pub long_lived: bool,
    pub large_variations: bool,
    pub average_size: usize,
    pub allocation_count: usize,
}

impl UsagePattern {
    pub fn new() -> Self {
        UsagePattern::default()
    }

    pub fn record_allocation(&mut self, size: usize) {
        self.allocation_count += 1;
        self.average_size = (self.average_size * (self.allocation_count - 1) + size) / self.allocation_count;
        
        // 简单的启发式规则
        if self.allocation_count > 100 {
            self.frequent_allocation = true;
        }
        
        let variation = (size as f64 - self.average_size as f64).abs() / self.average_size as f64;
        if variation > 0.5 {
            self.large_variations = true;
        }
    }
}

/// 大对象优化器
pub struct LargeObjectOptimization {
    pool: LargeObjectPool,
    usage_patterns: HashMap<String, UsagePattern>,
}

impl LargeObjectOptimization {
    pub fn new(strategy: LargeObjectStrategy) -> Self {
        LargeObjectOptimization {
            pool: LargeObjectPool::new(strategy),
            usage_patterns: HashMap::new(),
        }
    }

    /// 优化分配
    pub fn optimize_allocation(&mut self, size: usize, identifier: Option<&str>) -> Result<Vec<u8>> {
        let usage_pattern = identifier.and_then(|id| self.usage_patterns.get(id));
        self.pool.allocate(size, usage_pattern)
    }

    /// 优化释放
    pub fn optimize_deallocation(&mut self, vec: Vec<u8>, identifier: Option<&str>) {
        if let Some(id) = identifier {
            let size = vec.len();
            let pattern = self.usage_patterns.entry(id.to_string()).or_insert_with(UsagePattern::new);
            pattern.record_allocation(size);
        }
        
        self.pool.deallocate(vec);
    }

    /// 获取统计信息
    pub fn get_optimization_stats(&self) -> AllocationStats {
        self.pool.get_stats()
    }

    /// 设置优化策略
    pub fn set_optimization_strategy(&mut self, strategy: LargeObjectStrategy) {
        self.pool.set_strategy(strategy);
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_object_size_classification() {
        assert!(matches!(ObjectSize::classify(500), ObjectSize::Small(500)));
        assert!(matches!(ObjectSize::classify(5000), ObjectSize::Medium(5000)));
        assert!(matches!(ObjectSize::classify(5_000_000), ObjectSize::Large(5_000_000)));
        assert!(matches!(ObjectSize::classify(200_000_000), ObjectSize::Huge(200_000_000)));
    }

    #[test]
    fn test_large_object_pool_allocation() {
        let mut pool = LargeObjectPool::new(LargeObjectStrategy::DirectPool);
        let result = pool.allocate(1024 * 1024, None).unwrap(); // 1MB
        assert_eq!(result.len(), 1024 * 1024);
        
        pool.deallocate(result);
        let stats = pool.get_stats();
        assert_eq!(stats.total_allocations, 1);
        assert_eq!(stats.total_deallocations, 1);
    }

    #[test]
    fn test_usage_pattern_tracking() {
        let mut pattern = UsagePattern::new();
        pattern.record_allocation(1024);
        pattern.record_allocation(2048);
        
        assert!(pattern.average_size > 0);
        assert_eq!(pattern.allocation_count, 2);
    }
}