use crate::error::ClusterError;
use crate::types::{NodeInfo, Request, Response};
use anyhow::Result;
use duckdb::{params, Connection};
use openraft::{Entry, EntryPayload, LogId, RaftLogStorage, RaftStateMachine, StorageError};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;

/// Storage manager that handles DuckDB storage backend
pub struct StorageManager {
    connection: Arc<RwLock<Connection>>,
    state_machine: Arc<RwLock<ClusterStateMachine>>,
}

impl StorageManager {
    /// 创建 DuckDB 后端存储管理器（可选文件 / 内存模式）
    pub async fn new(database_path: Option<&str>) -> Result<Self> {
        let connection = if let Some(path) = database_path {
            Connection::open(path)?
        } else {
            Connection::open_in_memory()?
        };

        // Initialize database schema
        Self::initialize_schema(&connection).await?;

        // Load existing state machine data
        let state_machine = Self::load_state_machine(&connection).await?;

        Ok(Self {
            connection: Arc::new(RwLock::new(connection)),
            state_machine: Arc::new(RwLock::new(state_machine)),
        })
    }

    /// 初始化底层表结构（Raft 日志 / 元信息 / 状态机快照）
    async fn initialize_schema(conn: &Connection) -> Result<()> {
        // Create table for Raft logs
        conn.execute(
            "CREATE TABLE IF NOT EXISTS raft_logs (
                log_index BIGINT PRIMARY KEY,
                log_term BIGINT NOT NULL,
                entry_data BLOB NOT NULL
            )",
            [],
        )?;

        // Create table for Raft metadata
        conn.execute(
            "CREATE TABLE IF NOT EXISTS raft_meta (
                key_name VARCHAR PRIMARY KEY,
                log_index BIGINT,
                log_term BIGINT
            )",
            [],
        )?;

        // Create table for cluster state
        conn.execute(
            "CREATE TABLE IF NOT EXISTS cluster_state (
                id INTEGER PRIMARY KEY,
                state_data BLOB NOT NULL,
                updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )",
            [],
        )?;

        // Create index for performance
        conn.execute(
            "CREATE INDEX IF NOT EXISTS idx_raft_logs_term ON raft_logs(log_term)",
            [],
        )?;

        Ok(())
    }

    /// 从数据库加载状态机（不存在则返回空状态机）
    async fn load_state_machine(conn: &Connection) -> Result<ClusterStateMachine> {
        let mut stmt = conn.prepare("SELECT state_data FROM cluster_state WHERE id = 1")?;
        let mut rows = stmt.query([])?;

        if let Some(row) = rows.next()? {
            let data: Vec<u8> = row.get(0)?;
            let state_machine: ClusterStateMachine = bincode::deserialize(&data)?;
            Ok(state_machine)
        } else {
            Ok(ClusterStateMachine::new())
        }
    }

    /// 持久化状态机到数据库（覆盖 id=1）
    async fn save_state_machine(
        conn: &Connection,
        state_machine: &ClusterStateMachine,
    ) -> Result<()> {
        let data = bincode::serialize(state_machine)?;
        conn.execute(
            "INSERT OR REPLACE INTO cluster_state (id, state_data) VALUES (1, ?)",
            params![data],
        )?;
        Ok(())
    }

    /// 获取状态机引用（Arc<RwLock<...>>）
    pub async fn get_state_machine(&self) -> Arc<RwLock<ClusterStateMachine>> {
        self.state_machine.clone()
    }

    /// 创建 Raft 日志存储实例（DuckDB 实现）
    pub async fn create_raft_storage(&self) -> DuckDBRaftStorage {
        DuckDBRaftStorage {
            connection: self.connection.clone(),
            state_machine: self.state_machine.clone(),
        }
    }
}

/// DuckDB implementation of Raft storage
pub struct DuckDBRaftStorage {
    connection: Arc<RwLock<Connection>>,
    state_machine: Arc<RwLock<ClusterStateMachine>>,
}

#[async_trait::async_trait]
impl RaftLogStorage<u64, Request> for DuckDBRaftStorage {
    type Error = StorageError<u64>;

    /// 读取给定区间的日志条目 (包含边界)
    async fn get_log_entries(
        &mut self,
        start: LogId<u64>,
        end: LogId<u64>,
    ) -> Result<Vec<Entry<Request>>, Self::Error> {
        let conn = self.connection.read().await;
        let mut stmt = conn.prepare("SELECT entry_data FROM raft_logs WHERE log_index >= ? AND log_index <= ? ORDER BY log_index")
            .map_err(|e| StorageError::IO { source: anyhow::anyhow!(e) })?;

        let rows = stmt
            .query_map(params![start.index as i64, end.index as i64], |row| {
                let data: Vec<u8> = row.get(0)?;
                Ok(data)
            })
            .map_err(|e| StorageError::IO {
                source: anyhow::anyhow!(e),
            })?;

        let mut entries = Vec::new();
        for row in rows {
            let data = row.map_err(|e| StorageError::IO {
                source: anyhow::anyhow!(e),
            })?;
            let entry: Entry<Request> =
                bincode::deserialize(&data).map_err(|e| StorageError::IO {
                    source: anyhow::anyhow!(e),
                })?;
            entries.push(entry);
        }

        Ok(entries)
    }

    /// 删除区间日志条目
    async fn delete_log_entries(
        &mut self,
        start: LogId<u64>,
        end: LogId<u64>,
    ) -> Result<(), Self::Error> {
        let conn = self.connection.write().await;
        conn.execute(
            "DELETE FROM raft_logs WHERE log_index >= ? AND log_index <= ?",
            params![start.index as i64, end.index as i64],
        )
        .map_err(|e| StorageError::IO {
            source: anyhow::anyhow!(e),
        })?;

        Ok(())
    }

    /// 追加日志条目（逐条序列化写入）
    async fn append_log_entries(&mut self, entries: &[Entry<Request>]) -> Result<(), Self::Error> {
        let conn = self.connection.write().await;

        for entry in entries {
            let data = bincode::serialize(entry).map_err(|e| StorageError::IO {
                source: anyhow::anyhow!(e),
            })?;

            conn.execute(
                "INSERT OR REPLACE INTO raft_logs (log_index, log_term, entry_data) VALUES (?, ?, ?)",
                params![entry.log_id.index as i64, entry.log_id.term as i64, data],
            ).map_err(|e| StorageError::IO { source: anyhow::anyhow!(e) })?;
        }

        Ok(())
    }

    /// 读取 last_applied 元数据
    async fn get_last_applied(&self) -> Result<Option<LogId<u64>>, Self::Error> {
        let conn = self.connection.read().await;
        let mut stmt = conn
            .prepare("SELECT log_index, log_term FROM raft_meta WHERE key_name = 'last_applied'")
            .map_err(|e| StorageError::IO {
                source: anyhow::anyhow!(e),
            })?;

        let mut rows = stmt.query([]).map_err(|e| StorageError::IO {
            source: anyhow::anyhow!(e),
        })?;

        if let Some(row) = rows.next().map_err(|e| StorageError::IO {
            source: anyhow::anyhow!(e),
        })? {
            let index: i64 = row.get(0).map_err(|e| StorageError::IO {
                source: anyhow::anyhow!(e),
            })?;
            let term: i64 = row.get(1).map_err(|e| StorageError::IO {
                source: anyhow::anyhow!(e),
            })?;
            Ok(Some(LogId {
                term: term as u64,
                index: index as u64,
            }))
        } else {
            Ok(None)
        }
    }

    /// 设置 last_applied 元数据
    async fn set_last_applied(&mut self, log_id: LogId<u64>) -> Result<(), Self::Error> {
        let conn = self.connection.write().await;
        conn.execute(
            "INSERT OR REPLACE INTO raft_meta (key_name, log_index, log_term) VALUES ('last_applied', ?, ?)",
            params![log_id.index as i64, log_id.term as i64],
        ).map_err(|e| StorageError::IO { source: anyhow::anyhow!(e) })?;

        Ok(())
    }

    /// 读取最后一条日志标识（last_log）
    async fn get_last_log_id(&self) -> Result<Option<LogId<u64>>, Self::Error> {
        let conn = self.connection.read().await;
        let mut stmt = conn
            .prepare("SELECT log_index, log_term FROM raft_meta WHERE key_name = 'last_log'")
            .map_err(|e| StorageError::IO {
                source: anyhow::anyhow!(e),
            })?;

        let mut rows = stmt.query([]).map_err(|e| StorageError::IO {
            source: anyhow::anyhow!(e),
        })?;

        if let Some(row) = rows.next().map_err(|e| StorageError::IO {
            source: anyhow::anyhow!(e),
        })? {
            let index: i64 = row.get(0).map_err(|e| StorageError::IO {
                source: anyhow::anyhow!(e),
            })?;
            let term: i64 = row.get(1).map_err(|e| StorageError::IO {
                source: anyhow::anyhow!(e),
            })?;
            Ok(Some(LogId {
                term: term as u64,
                index: index as u64,
            }))
        } else {
            Ok(None)
        }
    }

    /// 设置最后一条日志标识
    async fn set_last_log_id(&mut self, log_id: LogId<u64>) -> Result<(), Self::Error> {
        let conn = self.connection.write().await;
        conn.execute(
            "INSERT OR REPLACE INTO raft_meta (key_name, log_index, log_term) VALUES ('last_log', ?, ?)",
            params![log_id.index as i64, log_id.term as i64],
        ).map_err(|e| StorageError::IO { source: anyhow::anyhow!(e) })?;

        Ok(())
    }
}

#[async_trait::async_trait]
impl RaftStateMachine<Request, Response> for DuckDBRaftStorage {
    type Error = StorageError<u64>;

    /// 应用日志条目到状态机（并持久化新状态）
    async fn apply(&mut self, entries: &[Entry<Request>]) -> Result<Vec<Response>, Self::Error> {
        let mut responses = Vec::new();
        let mut state_machine = self.state_machine.write().await;

        for entry in entries {
            if let EntryPayload::Normal(request) = &entry.payload {
                match state_machine.apply_request(request.clone()).await {
                    Ok(response) => responses.push(response),
                    Err(e) => {
                        responses.push(Response::error(request.id, e.to_string()));
                    }
                }
            }
        }

        // Persist state machine state to database
        let conn = self.connection.read().await;
        if let Err(e) = StorageManager::save_state_machine(&conn, &*state_machine).await {
            return Err(StorageError::IO { source: e });
        }

        Ok(responses)
    }

    /// 获取状态机快照（二进制 bincode）
    async fn get_snapshot(&self) -> Result<Option<Vec<u8>>, Self::Error> {
        let state_machine = self.state_machine.read().await;
        let data = bincode::serialize(&*state_machine).map_err(|e| StorageError::IO {
            source: anyhow::anyhow!(e),
        })?;
        Ok(Some(data))
    }

    /// 安装快照（覆盖内存状态并持久化）
    async fn install_snapshot(&mut self, snapshot: Vec<u8>) -> Result<(), Self::Error> {
        let state_machine: ClusterStateMachine =
            bincode::deserialize(&snapshot).map_err(|e| StorageError::IO {
                source: anyhow::anyhow!(e),
            })?;

        *self.state_machine.write().await = state_machine.clone();

        // Persist to database
        let conn = self.connection.read().await;
        StorageManager::save_state_machine(&conn, &state_machine)
            .await
            .map_err(|e| StorageError::IO { source: e })?;

        Ok(())
    }
}

/// Cluster state machine that maintains the cluster state
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClusterStateMachine {
    /// Map of node ID to node information
    pub nodes: HashMap<String, NodeInfo>,
    /// Cluster configuration
    pub config: HashMap<String, serde_json::Value>,
    /// Last applied log index
    pub last_applied: Option<u64>,
}

impl ClusterStateMachine {
    /// 创建空状态机（内存结构）
    pub fn new() -> Self {
        Self {
            nodes: HashMap::new(),
            config: HashMap::new(),
            last_applied: None,
        }
    }

    /// 应用请求（根据 Operation 更新节点 / 配置）
    pub async fn apply_request(&mut self, request: Request) -> Result<Response> {
        use crate::types::Operation;

        match request.operation {
            Operation::AddNode {
                id,
                address,
                metadata,
            } => {
                let node = NodeInfo::new(id.clone(), address, metadata);
                self.nodes.insert(id.clone(), node);
                Ok(Response::success(
                    request.id,
                    Some(serde_json::json!({ "node_id": id })),
                ))
            }
            Operation::RemoveNode { id } => {
                if self.nodes.remove(&id).is_some() {
                    Ok(Response::success(
                        request.id,
                        Some(serde_json::json!({ "removed_node_id": id })),
                    ))
                } else {
                    Ok(Response::error(
                        request.id,
                        format!("Node {} not found", id),
                    ))
                }
            }
            Operation::UpdateNode {
                id,
                address,
                metadata,
            } => {
                if let Some(node) = self.nodes.get_mut(&id) {
                    if let Some(new_address) = address {
                        node.address = new_address;
                    }
                    if let Some(new_metadata) = metadata {
                        node.metadata = new_metadata;
                    }
                    node.update_last_seen();

                    Ok(Response::success(
                        request.id,
                        Some(serde_json::json!({ "updated_node_id": id })),
                    ))
                } else {
                    Ok(Response::error(
                        request.id,
                        format!("Node {} not found", id),
                    ))
                }
            }
            Operation::UpdateConfig { key, value } => {
                self.config.insert(key.clone(), value);
                Ok(Response::success(
                    request.id,
                    Some(serde_json::json!({ "updated_config_key": key })),
                ))
            }
            Operation::Custom {
                operation_type,
                data,
            } => {
                // Handle custom operations based on operation_type
                Ok(Response::success(
                    request.id,
                    Some(serde_json::json!({
                        "operation_type": operation_type,
                        "processed": true,
                        "data": data
                    })),
                ))
            }
        }
    }

    /// Get all nodes in the cluster
    pub fn get_nodes(&self) -> &HashMap<String, NodeInfo> {
        &self.nodes
    }

    /// Get a specific node by ID
    pub fn get_node(&self, id: &str) -> Option<&NodeInfo> {
        self.nodes.get(id)
    }

    /// Get cluster configuration
    pub fn get_config(&self) -> &HashMap<String, serde_json::Value> {
        &self.config
    }

    /// Get the number of active nodes
    pub fn active_node_count(&self) -> usize {
        self.nodes
            .values()
            .filter(|node| node.status.is_active())
            .count()
    }

    /// Get the number of healthy nodes
    pub fn healthy_node_count(&self) -> usize {
        self.nodes
            .values()
            .filter(|node| node.status.is_available())
            .count()
    }
}

impl Default for ClusterStateMachine {
    fn default() -> Self {
        Self::new()
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::types::Operation;
    use tempfile::tempdir;

    #[tokio::test]
    async fn test_duckdb_storage_manager_creation() {
        let storage = StorageManager::new(None).await.unwrap();
        let state_machine = storage.get_state_machine().await;
        let sm = state_machine.read().await;
        assert_eq!(sm.nodes.len(), 0);
    }

    #[tokio::test]
    async fn test_cluster_state_machine_operations() {
        let mut sm = ClusterStateMachine::new();

        // Test add node
        let operation = Operation::AddNode {
            id: "node1".to_string(),
            address: "127.0.0.1:8080".to_string(),
            metadata: HashMap::new(),
        };
        let request = Request::new(operation);
        let response = sm.apply_request(request).await.unwrap();
        assert!(response.success);
        assert!(sm.nodes.contains_key("node1"));

        // Test remove node
        let operation = Operation::RemoveNode {
            id: "node1".to_string(),
        };
        let request = Request::new(operation);
        let response = sm.apply_request(request).await.unwrap();
        assert!(response.success);
        assert!(!sm.nodes.contains_key("node1"));
    }

    #[tokio::test]
    async fn test_duckdb_persistence() {
        let temp_dir = tempdir().unwrap();
        let db_path = temp_dir.path().join("test.db");

        // Create storage and add some data
        {
            let storage = StorageManager::new(Some(db_path.to_str().unwrap()))
                .await
                .unwrap();
            let state_machine = storage.get_state_machine().await;
            let mut sm = state_machine.write().await;

            let operation = Operation::AddNode {
                id: "persistent_node".to_string(),
                address: "127.0.0.1:9999".to_string(),
                metadata: HashMap::new(),
            };
            let request = Request::new(operation);
            sm.apply_request(request).await.unwrap();
        }

        // Recreate storage and verify data persists
        {
            let storage = StorageManager::new(Some(db_path.to_str().unwrap()))
                .await
                .unwrap();
            let state_machine = storage.get_state_machine().await;
            let sm = state_machine.read().await;
            assert!(sm.nodes.contains_key("persistent_node"));
        }
    }
}
