// SPDX-License-Identifier: Mulan PSL v2
/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This software is licensed under Mulan PSL v2.
 * You can use this software according to the terms and conditions of the Mulan PSL v2.
 * You may obtain a copy of Mulan PSL v2 at:
 *         http://license.coscl.org.cn/MulanPSL2
 *
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PSL v2 for more details.
 */

use std::sync::{
    atomic::{AtomicBool, Ordering},
    Arc,
};
use std::time::Duration;
use tokio::sync::mpsc;
use crate::mock_common::ipc::{
    framer::GenericFramer,
    transport::shmem::{ShmemTransport, ShmemTransportBuilder},
};
use crate::config::ServerConfig;
use crate::dispatcher::{RequestDispatcher, RequestDispatcherBuilder};
use crate::error::{ServerError, ServerResult};
use crate::gpu::GPUResourceManager;

/// Server lifecycle states.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ServerState {
    /// Server is being initialized.
    Initializing,
    /// Server is ready to accept connections.
    Ready,
    /// Server is running and processing requests.
    Running,
    /// Server is shutting down gracefully.
    Stopping,
    /// Server has stopped.
    Stopped,
    /// Server encountered a fatal error.
    Error,
}

/// Core server structure managing the entire xgpu-server lifecycle.
pub struct ServerCore {
    /// Server configuration.
    config: ServerConfig,
    
    /// Shared memory transport for client-to-server communication.
    c2s_transport: Option<ShmemTransport>,
    
    /// Shared memory transport for server-to-client communication.
    s2c_transport: Option<ShmemTransport>,
    
    /// Generic framer for message encoding/decoding.
    framer: GenericFramer,
    
    /// Request dispatcher.
    dispatcher: RequestDispatcher,
    
    /// GPU resource manager.
    gpu_manager: Arc<GPUResourceManager>,
    
    /// Server running state.
    running: Arc<AtomicBool>,
    
    /// Current server state.
    state: ServerState,
    
    /// Shutdown signal sender.
    shutdown_tx: Option<mpsc::Sender<()>>,
    
    /// Shutdown signal receiver.
    shutdown_rx: Option<mpsc::Receiver<()>>,
}

impl ServerCore {
    /// Create a new server core with the given configuration.
    pub fn new(config: ServerConfig) -> ServerResult<Self> {
        // Validate configuration
        config.validate()?;
        
        log::info!("Initializing xgpu-server with config: {:?}", config);
        
        // Initialize GPU resource manager
        let gpu_manager = Arc::new(GPUResourceManager::new(config.gpu.clone())?);
        
        // Initialize request dispatcher
        let dispatcher = RequestDispatcherBuilder::new()
            .with_gpu_manager(gpu_manager.clone())
            .build()?;
        
        // Initialize framer
        let framer = GenericFramer::new(config.framing.max_frame_size);
        
        // Create shutdown channel
        let (shutdown_tx, shutdown_rx) = mpsc::channel(1);
        
        Ok(Self {
            config,
            c2s_transport: None,
            s2c_transport: None,
            framer,
            dispatcher,
            gpu_manager,
            running: Arc::new(AtomicBool::new(false)),
            state: ServerState::Initializing,
            shutdown_tx: Some(shutdown_tx),
            shutdown_rx: Some(shutdown_rx),
        })
    }
    
    /// Initialize the server.
    pub async fn initialize(&mut self) -> ServerResult<()> {
        log::info!("Initializing server...");
        self.state = ServerState::Initializing;
        
        // Initialize shared memory transports
        self.initialize_transports().await?;
        
        self.state = ServerState::Ready;
        log::info!("Server initialized successfully");
        
        Ok(())
    }
    
    /// Initialize shared memory transports.
    async fn initialize_transports(&mut self) -> ServerResult<()> {
        log::debug!("Initializing shared memory transports...");
        
        // Create client-to-server transport
        let c2s_transport = ShmemTransportBuilder::new()
            .path(&self.config.shmem.c2s_path)
            .size(self.config.shmem.buffer_size)
            .build()
            .create()
            .map_err(|e| ServerError::transport(e))?;
        
        // Create server-to-client transport
        let s2c_transport = ShmemTransportBuilder::new()
            .path(&self.config.shmem.s2c_path)
            .size(self.config.shmem.buffer_size)
            .build()
            .create()
            .map_err(|e| ServerError::transport(e))?;
        
        self.c2s_transport = Some(c2s_transport);
        self.s2c_transport = Some(s2c_transport);
        
        log::debug!("Shared memory transports initialized");
        Ok(())
    }
    
    /// Start the server.
    pub async fn start(&mut self) -> ServerResult<()> {
        if self.state != ServerState::Ready {
            return Err(ServerError::lifecycle(format!(
                "Cannot start server in state {:?}",
                self.state
            )));
        }
        
        log::info!("Starting xgpu-server...");
        self.state = ServerState::Running;
        self.running.store(true, Ordering::Release);
        
        // Start the main event loop
        self.run_event_loop().await?;
        
        Ok(())
    }
    
    /// Stop the server gracefully.
    pub async fn stop(&mut self) -> ServerResult<()> {
        log::info!("Stopping xgpu-server...");
        self.state = ServerState::Stopping;
        self.running.store(false, Ordering::Release);
        
        // Send shutdown signal
        if let Some(tx) = self.shutdown_tx.take() {
            let _ = tx.send(()).await;
        }
        
        // Clean up resources
        self.cleanup().await?;
        
        self.state = ServerState::Stopped;
        log::info!("Server stopped successfully");
        
        Ok(())
    }
    
    /// Get the current server state.
    pub fn state(&self) -> ServerState {
        self.state
    }
    
    /// Check if the server is running.
    pub fn is_running(&self) -> bool {
        self.running.load(Ordering::Acquire)
    }
    
    /// Get server statistics.
    pub fn get_stats(&self) -> serde_json::Value {
        let dispatcher_stats = self.dispatcher.get_stats();
        let gpu_stats = self.gpu_manager.get_memory_stats();
        
        serde_json::json!({
            "server_state": format!("{:?}", self.state),
            "is_running": self.is_running(),
            "dispatcher": dispatcher_stats,
            "gpu_memory": gpu_stats,
            "config": {
                "shmem_buffer_size": self.config.shmem.buffer_size,
                "max_frame_size": self.config.framing.max_frame_size,
                "request_timeout_ms": self.config.performance.request_timeout_ms,
            }
        })
    }
    
    /// Run the main event loop.
    async fn run_event_loop(&mut self) -> ServerResult<()> {
        log::info!("Starting main event loop...");
        
        let c2s_transport = self.c2s_transport.take()
            .ok_or_else(|| ServerError::lifecycle("C2S transport not initialized".to_string()))?;
        
        let s2c_transport = self.s2c_transport.take()
            .ok_or_else(|| ServerError::lifecycle("S2C transport not initialized".to_string()))?;
        
        let mut shutdown_rx = self.shutdown_rx.take()
            .ok_or_else(|| ServerError::lifecycle("Shutdown receiver not available".to_string()))?;
        
        let running = self.running.clone();
        let dispatcher = &self.dispatcher;
        let framer = &self.framer;
        
        // Main event loop
        loop {
            tokio::select! {
                // Check for shutdown signal
                _ = shutdown_rx.recv() => {
                    log::info!("Shutdown signal received");
                    break;
                }
                
                // Process incoming requests (simplified - in real implementation
                // we would read from shared memory transport)
                _ = tokio::time::sleep(Duration::from_millis(100)) => {
                    if !running.load(Ordering::Acquire) {
                        break;
                    }
                    
                    // This is a placeholder for the actual request processing loop
                    // In a real implementation, we would:
                    // 1. Read data from c2s_transport
                    // 2. Decode frames using framer
                    // 3. Parse requests
                    // 4. Dispatch to handlers
                    // 5. Encode responses
                    // 6. Write to s2c_transport
                    
                    log::trace!("Event loop iteration");
                }
            }
        }
        
        log::info!("Event loop terminated");
        Ok(())
    }
    
    /// Clean up server resources.
    async fn cleanup(&mut self) -> ServerResult<()> {
        log::debug!("Cleaning up server resources...");
        
        // Close transports
        self.c2s_transport = None;
        self.s2c_transport = None;
        
        log::debug!("Server cleanup completed");
        Ok(())
    }
    
    /// Handle a fatal error.
    pub fn handle_fatal_error(&mut self, error: ServerError) {
        log::error!("Fatal server error: {}", error);
        self.state = ServerState::Error;
        self.running.store(false, Ordering::Release);
    }
}

/// High-level server interface for external use.
pub struct XGPUServer {
    core: ServerCore,
}

impl XGPUServer {
    /// Create a new xgpu-server with the given configuration.
    pub fn new(config: ServerConfig) -> ServerResult<Self> {
        let core = ServerCore::new(config)?;
        Ok(Self { core })
    }
    
    /// Create a new xgpu-server with default configuration.
    pub fn with_default_config() -> ServerResult<Self> {
        let config = ServerConfig::default();
        Self::new(config)
    }
    
    /// Create a new xgpu-server with configuration from environment.
    pub fn from_env() -> ServerResult<Self> {
        let config = ServerConfig::from_env()?;
        Self::new(config)
    }
    
    /// Start the server.
    pub async fn start(&mut self) -> ServerResult<()> {
        self.core.initialize().await?;
        self.core.start().await
    }
    
    /// Stop the server.
    pub async fn stop(&mut self) -> ServerResult<()> {
        self.core.stop().await
    }
    
    /// Check if the server is running.
    pub fn is_running(&self) -> bool {
        self.core.is_running()
    }
    
    /// Get server state.
    pub fn state(&self) -> ServerState {
        self.core.state()
    }
    
    /// Get server statistics.
    pub fn get_stats(&self) -> serde_json::Value {
        self.core.get_stats()
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    
    #[test]
    fn test_server_creation() {
        let config = ServerConfig::default();
        let server = XGPUServer::new(config);
        assert!(server.is_ok());
    }
    
    #[test]
    fn test_server_state_transitions() {
        let config = ServerConfig::default();
        let mut core = ServerCore::new(config).unwrap();
        
        assert_eq!(core.state(), ServerState::Initializing);
        assert!(!core.is_running());
    }
    
    #[tokio::test]
    async fn test_server_lifecycle() {
        let config = ServerConfig::default();
        let mut server = XGPUServer::new(config).unwrap();
        
        assert_eq!(server.state(), ServerState::Initializing);
        
        // Note: Full lifecycle test would require proper shared memory setup
        // This is a basic state check
        assert!(!server.is_running());
    }
}