// SPDX-License-Identifier: Mulan PSL v2
/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This software is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *         http://license.coscl.org.cn/MulanPSL2
 *
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
 */

use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
use crate::error::{ServerError, ServerResult};
use crate::config::GpuConfig;

/// A device pointer representing GPU memory.
pub type DevicePtr = u64;

/// Information about a GPU device.
#[derive(Debug, Clone)]
pub struct DeviceInfo {
    pub device_id: u32,
    pub name: String,
    pub total_memory: u64,
    pub free_memory: u64,
    pub compute_capability_major: i32,
    pub compute_capability_minor: i32,
    pub max_threads_per_block: i32,
    pub max_blocks_per_grid: i32,
}

/// Memory allocation information.
#[derive(Debug, Clone)]
pub struct MemoryAllocation {
    pub ptr: DevicePtr,
    pub size: usize,
    pub device_id: u32,
    pub allocated_at: std::time::Instant,
}

/// Memory pool for efficient GPU memory management.
#[derive(Debug)]
struct MemoryPool {
    device_id: u32,
    available_blocks: Vec<(DevicePtr, usize)>,
    allocated_blocks: HashMap<DevicePtr, MemoryAllocation>,
    total_allocated: usize,
    max_allocations: usize,
}

impl MemoryPool {
    fn new(device_id: u32, max_allocations: usize) -> Self {
        Self {
            device_id,
            available_blocks: Vec::new(),
            allocated_blocks: HashMap::new(),
            total_allocated: 0,
            max_allocations,
        }
    }

    fn allocate(&mut self, size: usize) -> ServerResult<DevicePtr> {
        if self.allocated_blocks.len() >= self.max_allocations {
            return Err(ServerError::resource(format!(
                "Maximum allocations ({}) reached for device {}",
                self.max_allocations, self.device_id
            )));
        }

        // Try to find a suitable block from available blocks
        let mut best_fit_idx = None;
        let mut best_fit_size = usize::MAX;

        for (idx, &(_, block_size)) in self.available_blocks.iter().enumerate() {
            if block_size >= size && block_size < best_fit_size {
                best_fit_idx = Some(idx);
                best_fit_size = block_size;
            }
        }

        let ptr = if let Some(idx) = best_fit_idx {
            // Reuse existing block
            let (ptr, block_size) = self.available_blocks.remove(idx);
            
            // If block is much larger than needed, split it
            if block_size > size * 2 {
                let remaining_ptr = ptr + size as u64;
                let remaining_size = block_size - size;
                self.available_blocks.push((remaining_ptr, remaining_size));
            }
            
            ptr
        } else {
            // Allocate new block from GPU
            self.cuda_malloc(size)?
        };

        let allocation = MemoryAllocation {
            ptr,
            size,
            device_id: self.device_id,
            allocated_at: std::time::Instant::now(),
        };

        self.allocated_blocks.insert(ptr, allocation);
        self.total_allocated += size;

        log::debug!(
            "Allocated {} bytes at 0x{:x} on device {} (total: {} bytes)",
            size, ptr, self.device_id, self.total_allocated
        );

        Ok(ptr)
    }

    fn deallocate(&mut self, ptr: DevicePtr) -> ServerResult<()> {
        if let Some(allocation) = self.allocated_blocks.remove(&ptr) {
            self.total_allocated -= allocation.size;
            
            // Add to available blocks for reuse
            self.available_blocks.push((ptr, allocation.size));
            
            log::debug!(
                "Deallocated {} bytes at 0x{:x} on device {} (total: {} bytes)",
                allocation.size, ptr, self.device_id, self.total_allocated
            );
            
            Ok(())
        } else {
            Err(ServerError::resource(format!(
                "Invalid memory pointer 0x{:x} for device {}",
                ptr, self.device_id
            )))
        }
    }

    fn get_allocation_info(&self, ptr: DevicePtr) -> Option<&MemoryAllocation> {
        self.allocated_blocks.get(&ptr)
    }

    // Mock CUDA malloc - in real implementation this would call actual CUDA API
    fn cuda_malloc(&self, size: usize) -> ServerResult<DevicePtr> {
        // This is a mock implementation
        // In a real implementation, this would call cudaMalloc
        static mut MOCK_PTR_COUNTER: u64 = 0x1000000;
        
        unsafe {
            MOCK_PTR_COUNTER += 0x1000; // Align to 4KB boundaries
            Ok(MOCK_PTR_COUNTER)
        }
    }

    // Mock CUDA free - in real implementation this would call actual CUDA API
    fn _cuda_free(&self, _ptr: DevicePtr) -> ServerResult<()> {
        // This is a mock implementation
        // In a real implementation, this would call cudaFree
        Ok(())
    }
}

/// GPU resource manager responsible for managing GPU devices and memory.
#[derive(Debug)]
pub struct GPUResourceManager {
    devices: Arc<RwLock<HashMap<u32, DeviceInfo>>>,
    memory_pools: Arc<RwLock<HashMap<u32, MemoryPool>>>,
    config: GpuConfig,
}

impl GPUResourceManager {
    /// Create a new GPU resource manager.
    pub fn new(config: GpuConfig) -> ServerResult<Self> {
        let mut manager = Self {
            devices: Arc::new(RwLock::new(HashMap::new())),
            memory_pools: Arc::new(RwLock::new(HashMap::new())),
            config,
        };

        manager.initialize_devices()?;
        Ok(manager)
    }

    /// Initialize GPU devices.
    fn initialize_devices(&mut self) -> ServerResult<()> {
        let device_count = self.get_device_count()?;
        let mut devices = self.devices.write();
        let mut pools = self.memory_pools.write();

        let target_devices = if self.config.device_ids.is_empty() {
            (0..device_count).collect()
        } else {
            self.config.device_ids.clone()
        };

        for device_id in target_devices {
            if device_id >= device_count {
                log::warn!("Device {} not available (only {} devices found)", device_id, device_count);
                continue;
            }

            let device_info = self.get_device_info_raw(device_id)?;
            devices.insert(device_id, device_info);

            let pool = MemoryPool::new(device_id, self.config.max_allocations_per_device);
            pools.insert(device_id, pool);

            log::info!("Initialized GPU device {}", device_id);
        }

        if devices.is_empty() {
            return Err(ServerError::resource("No GPU devices available".to_string()));
        }

        log::info!("Initialized {} GPU devices", devices.len());
        Ok(())
    }

    /// Get the number of available GPU devices.
    fn get_device_count(&self) -> ServerResult<u32> {
        // Mock implementation - in real implementation this would call cudaGetDeviceCount
        Ok(1) // Assume 1 device for mock
    }

    /// Get device information.
    fn get_device_info_raw(&self, device_id: u32) -> ServerResult<DeviceInfo> {
        // Mock implementation - in real implementation this would call CUDA APIs
        Ok(DeviceInfo {
            device_id,
            name: format!("Mock GPU Device {}", device_id),
            total_memory: 8 * 1024 * 1024 * 1024, // 8GB
            free_memory: 6 * 1024 * 1024 * 1024,  // 6GB
            compute_capability_major: 7,
            compute_capability_minor: 5,
            max_threads_per_block: 1024,
            max_blocks_per_grid: 65535,
        })
    }

    /// Allocate GPU memory.
    pub fn allocate_memory(&self, size: usize, device_id: u32) -> ServerResult<DevicePtr> {
        let mut pools = self.memory_pools.write();
        
        let pool = pools.get_mut(&device_id)
            .ok_or_else(|| ServerError::resource(format!("Device {} not available", device_id)))?;

        pool.allocate(size)
    }

    /// Free GPU memory.
    pub fn free_memory(&self, ptr: DevicePtr) -> ServerResult<()> {
        let mut pools = self.memory_pools.write();
        
        // Find which device this pointer belongs to
        for pool in pools.values_mut() {
            if pool.get_allocation_info(ptr).is_some() {
                return pool.deallocate(ptr);
            }
        }

        Err(ServerError::resource(format!(
            "Invalid memory pointer 0x{:x}",
            ptr
        )))
    }

    /// Get device information.
    pub fn get_device_info(&self, device_id: u32) -> ServerResult<DeviceInfo> {
        let devices = self.devices.read();
        
        devices.get(&device_id)
            .cloned()
            .ok_or_else(|| ServerError::resource(format!("Device {} not available", device_id)))
    }

    /// Copy memory between GPU pointers.
    pub fn copy_memory(&self, src: DevicePtr, dst: DevicePtr, size: usize) -> ServerResult<()> {
        // Validate source and destination pointers
        let pools = self.memory_pools.read();
        
        let mut src_valid = false;
        let mut dst_valid = false;
        
        for pool in pools.values() {
            if let Some(src_alloc) = pool.get_allocation_info(src) {
                if src_alloc.size < size {
                    return Err(ServerError::resource(format!(
                        "Source allocation too small: {} < {}",
                        src_alloc.size, size
                    )));
                }
                src_valid = true;
            }
            
            if let Some(dst_alloc) = pool.get_allocation_info(dst) {
                if dst_alloc.size < size {
                    return Err(ServerError::resource(format!(
                        "Destination allocation too small: {} < {}",
                        dst_alloc.size, size
                    )));
                }
                dst_valid = true;
            }
        }
        
        if !src_valid {
            return Err(ServerError::resource(format!("Invalid source pointer 0x{:x}", src)));
        }
        
        if !dst_valid {
            return Err(ServerError::resource(format!("Invalid destination pointer 0x{:x}", dst)));
        }

        // Mock implementation - in real implementation this would call cudaMemcpy
        log::debug!("Copying {} bytes from 0x{:x} to 0x{:x}", size, src, dst);
        
        Ok(())
    }

    /// Get memory usage statistics.
    pub fn get_memory_stats(&self) -> HashMap<u32, (usize, usize)> {
        let pools = self.memory_pools.read();
        let mut stats = HashMap::new();
        
        for (&device_id, pool) in pools.iter() {
            stats.insert(device_id, (pool.total_allocated, pool.allocated_blocks.len()));
        }
        
        stats
    }

    /// Get list of available devices.
    pub fn get_available_devices(&self) -> Vec<u32> {
        let devices = self.devices.read();
        devices.keys().cloned().collect()
    }
}