// SPDX-License-Identifier: Mulan PSL v2
/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This software is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *         http://license.coscl.org.cn/MulanPSL2
 *
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
 */

use std::sync::Arc;
use std::time::{Duration, Instant};
use crate::mock_common::ipc::message::Request;
use crate::error::{ServerError, ServerResult};
use crate::handlers::{CudaApiHandler, ApiResponse};
use crate::gpu::GPUResourceManager;

/// Request dispatcher responsible for routing requests to appropriate handlers.
pub struct RequestDispatcher {
    cuda_handler: CudaApiHandler,
    stats: DispatcherStats,
}

/// Statistics for the request dispatcher.
#[derive(Debug, Default)]
struct DispatcherStats {
    total_requests: std::sync::atomic::AtomicU64,
    successful_requests: std::sync::atomic::AtomicU64,
    failed_requests: std::sync::atomic::AtomicU64,
    average_processing_time: std::sync::RwLock<Duration>,
}

impl RequestDispatcher {
    /// Create a new request dispatcher.
    pub fn new(gpu_manager: Arc<GPUResourceManager>) -> Self {
        Self {
            cuda_handler: CudaApiHandler::new(gpu_manager),
            stats: DispatcherStats::default(),
        }
    }
    
    /// Dispatch a request to the appropriate handler.
    pub async fn dispatch_request(&self, request: Request<'_>) -> ServerResult<ApiResponse> {
        let start_time = Instant::now();
        
        // Increment total request counter
        self.stats.total_requests.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
        
        log::debug!("Dispatching request with method_id: {}", request.method_id());
        
        // Route request based on method ID range or type
        let result = self.route_request(&request).await;
        
        // Update statistics
        let processing_time = start_time.elapsed();
        self.update_stats(&result, processing_time);
        
        match &result {
            Ok(_) => log::debug!(
                "Request {} processed successfully in {:?}",
                request.method_id(),
                processing_time
            ),
            Err(e) => log::warn!(
                "Request {} failed: {} (processing time: {:?})",
                request.method_id(),
                e,
                processing_time
            ),
        }
        
        result
    }
    
    /// Route request to the appropriate handler.
    async fn route_request(&self, request: &Request<'_>) -> ServerResult<ApiResponse> {
        let method_id = request.method_id();
        
        // Route based on method ID ranges
        match method_id {
            // CUDA Runtime API range (1-100)
            1..=100 => self.handle_cuda_request(request).await,
            
            // CUDA Driver API range (101-200)
            101..=200 => self.handle_cuda_driver_request(request).await,
            
            // Internal management API range (201-300)
            201..=300 => self.handle_management_request(request).await,
            
            // Unknown method ID
            _ => Err(ServerError::request_processing(format!(
                "Unknown method ID: {}",
                method_id
            ))),
        }
    }
    
    /// Handle CUDA Runtime API request.
    async fn handle_cuda_request(&self, request: &Request<'_>) -> ServerResult<ApiResponse> {
        // Clone the necessary data to avoid lifetime issues
        let method_id = request.method_id();
        let handler = self.cuda_handler.clone();
        
        // Use tokio spawn_blocking for potentially blocking CUDA operations
        tokio::task::spawn_blocking(move || {
            // Create a new request with the same method_id for the async task
            let static_request = Request::new(method_id);
            handler.handle_request(&static_request)
        }).await
        .map_err(|e| ServerError::internal(format!("Task join error: {}", e)))?
    }
    
    /// Handle CUDA Driver API request.
    async fn handle_cuda_driver_request(&self, _request: &Request<'_>) -> ServerResult<ApiResponse> {
        // Driver API not implemented yet
        Ok(ApiResponse::error(
            -1,
            "CUDA Driver API not implemented yet".to_string()
        ))
    }
    
    /// Handle internal management request.
    async fn handle_management_request(&self, request: &Request<'_>) -> ServerResult<ApiResponse> {
        match request.method_id() {
            201 => self.handle_get_stats_request().await,
            202 => self.handle_health_check_request().await,
            203 => self.handle_shutdown_request().await,
            _ => Ok(ApiResponse::error(
                -1,
                format!("Unknown management method ID: {}", request.method_id())
            )),
        }
    }
    
    /// Handle statistics request.
    async fn handle_get_stats_request(&self) -> ServerResult<ApiResponse> {
        let stats = self.get_stats();
        let stats_json = serde_json::to_vec(&stats)?;
        Ok(ApiResponse::success(stats_json))
    }
    
    /// Handle health check request.
    async fn handle_health_check_request(&self) -> ServerResult<ApiResponse> {
        let health_data = serde_json::json!({
            "status": "healthy",
            "timestamp": chrono::Utc::now().to_rfc3339(),
            "uptime_seconds": self.get_uptime_seconds()
        });
        
        let health_json = serde_json::to_vec(&health_data)?;
        Ok(ApiResponse::success(health_json))
    }
    
    /// Handle shutdown request.
    async fn handle_shutdown_request(&self) -> ServerResult<ApiResponse> {
        log::info!("Shutdown request received");
        
        // In a real implementation, this would trigger a graceful shutdown
        let response_data = serde_json::json!({
            "message": "Shutdown initiated"
        });
        
        let response_json = serde_json::to_vec(&response_data)?;
        Ok(ApiResponse::success(response_json))
    }
    
    /// Update dispatcher statistics.
    fn update_stats(&self, result: &ServerResult<ApiResponse>, processing_time: Duration) {
        match result {
            Ok(response) => {
                if response.success {
                    self.stats.successful_requests.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
                } else {
                    self.stats.failed_requests.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
                }
            },
            Err(_) => {
                self.stats.failed_requests.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
            }
        }
        
        // Update average processing time (simple moving average)
        if let Ok(mut avg_time) = self.stats.average_processing_time.write() {
            *avg_time = (*avg_time + processing_time) / 2;
        }
    }
    
    /// Get dispatcher statistics.
    pub fn get_stats(&self) -> serde_json::Value {
        let total = self.stats.total_requests.load(std::sync::atomic::Ordering::Relaxed);
        let successful = self.stats.successful_requests.load(std::sync::atomic::Ordering::Relaxed);
        let failed = self.stats.failed_requests.load(std::sync::atomic::Ordering::Relaxed);
        let avg_time = self.stats.average_processing_time.read().unwrap();
        
        serde_json::json!({
            "total_requests": total,
            "successful_requests": successful,
            "failed_requests": failed,
            "success_rate": if total > 0 { successful as f64 / total as f64 } else { 0.0 },
            "average_processing_time_ms": avg_time.as_millis(),
        })
    }
    
    /// Get server uptime in seconds.
    fn get_uptime_seconds(&self) -> u64 {
        // This is a placeholder - in real implementation we would track startup time
        static START_TIME: std::sync::OnceLock<Instant> = std::sync::OnceLock::new();
        let start = START_TIME.get_or_init(|| Instant::now());
        start.elapsed().as_secs()
    }
    

}

/// Builder for RequestDispatcher to allow custom configuration.
pub struct RequestDispatcherBuilder {
    gpu_manager: Option<Arc<GPUResourceManager>>,
}

impl RequestDispatcherBuilder {
    /// Create a new dispatcher builder.
    pub fn new() -> Self {
        Self {
            gpu_manager: None,
        }
    }
    
    /// Set the GPU resource manager.
    pub fn with_gpu_manager(mut self, gpu_manager: Arc<GPUResourceManager>) -> Self {
        self.gpu_manager = Some(gpu_manager);
        self
    }
    
    /// Build the request dispatcher.
    pub fn build(self) -> ServerResult<RequestDispatcher> {
        let gpu_manager = self.gpu_manager
            .ok_or_else(|| ServerError::configuration("GPU manager is required".to_string()))?;
        
        Ok(RequestDispatcher::new(gpu_manager))
    }
}

impl Default for RequestDispatcherBuilder {
    fn default() -> Self {
        Self::new()
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::config::GpuConfig;
    
    #[tokio::test]
    async fn test_dispatcher_creation() {
        let gpu_config = GpuConfig::default();
        let gpu_manager = Arc::new(GPUResourceManager::new(gpu_config).unwrap());
        
        let dispatcher = RequestDispatcherBuilder::new()
            .with_gpu_manager(gpu_manager)
            .build()
            .unwrap();
        
        let stats = dispatcher.get_stats();
        assert_eq!(stats["total_requests"], 0);
    }
    
    #[tokio::test]
    async fn test_management_requests() {
        let gpu_config = GpuConfig::default();
        let gpu_manager = Arc::new(GPUResourceManager::new(gpu_config).unwrap());
        let dispatcher = RequestDispatcher::new(gpu_manager);
        
        // Test health check
        let health_request = Request::new(202);
        let response = dispatcher.dispatch_request(health_request).await.unwrap();
        assert!(response.success);
        
        // Test stats request
        let stats_request = Request::new(201);
        let response = dispatcher.dispatch_request(stats_request).await.unwrap();
        assert!(response.success);
    }
}