pub mod config;
mod cli;

use tonic::{Request, Response, Status, transport::Server};
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use bincode::{Decode, Encode};
use sled::transaction::ConflictableTransactionError;
use std::time::{SystemTime, UNIX_EPOCH, Duration};
use std::sync::{Arc, RwLock};
use rand::prelude::IndexedRandom;
use tokio::time;

// 引入生成的代码
pub mod lab_distributed {
    tonic::include_proto!("lab_distributed");
}
use lab_distributed::lab_distributed_service_server::{LabDistributedService, LabDistributedServiceServer};
use lab_distributed::{
    ServiceInfo, GossipPushRequest, GossipPushResponse,
    GossipPullRequest, GossipPullResponse,
};
use crate::config::Settings;

// 为 gRPC 生成的 ServiceInfo 创建一个可序列化、可哈希的副本
#[derive(Clone, Debug, Encode, Decode, Eq)]
struct StoredServiceInfo {
    address: String,
    timestamp: u64,
    status: String,
    role: String,
}

// 实现 PartialEq 以便在 HashSet 中进行比较
impl PartialEq for StoredServiceInfo {
    fn eq(&self, other: &Self) -> bool {
        self.address == other.address
    }
}

// 实现 Hash 以便能存入 HashSet
impl Hash for StoredServiceInfo {
    fn hash<H: Hasher>(&self, state: &mut H) {
        self.address.hash(state);
    }
}

// 从 gRPC 类型转换为存储类型
impl From<ServiceInfo> for StoredServiceInfo {
    fn from(s: ServiceInfo) -> Self {
        Self {
            address: s.address,
            timestamp: s.timestamp,
            status: s.status,
            role: s.role,
        }
    }
}

// 从存储类型转换为 gRPC 类型
impl From<StoredServiceInfo> for ServiceInfo {
    fn from(s: StoredServiceInfo) -> Self {
        Self {
            address: s.address,
            timestamp: s.timestamp,
            status: s.status,
            role: s.role,
        }
    }
}

#[derive(Clone)]
pub struct LabDistributedServiceImpl {
    // 使用 Arc<sled::Db> 来共享数据连接
    db: sled::Db,
    peers: Arc<RwLock<Vec<String>>>,  // 修改为 RwLock 以支持写操作
    sync_interval: Duration,
}

const LAST_SYNC_KEY: &[u8] = b"last_sync_timestamp";   // 存储最后同步时间戳的键

const GROUP_NAME: &[u8] = b"lab-distributed";

impl LabDistributedServiceImpl {
    fn new(settings: &Settings) -> Self {
        let db = sled::open(settings.get_db_path())
            .expect("Failed to open sled database");
        Self {
            db,
            peers: Arc::new(RwLock::new(settings.get_peer_addrs())),           sync_interval: settings.get_sync_interval(),
        }
    }

    // 注册本节点信息到数据库
    fn register_self(&self, addr: String) -> Result<(), Box<dyn std::error::Error>> {
        let stored_info = StoredServiceInfo {
            address: addr.clone(),
            timestamp: Self::current_timestamp(),
            status: "ACTIVE".to_string(),
            role: "Follower".to_string(),  // 初始为 Follower
        };
        
        println!("Registering self: {} at timestamp {}", addr, stored_info.timestamp);

        let transaction_result = self.db.transaction(|tx_db| -> Result<(), ConflictableTransactionError> {

            let current_services_bytes = tx_db.get(GROUP_NAME)?;
            let mut services: HashSet<StoredServiceInfo> = if let Some(bytes) = current_services_bytes {
                bincode::decode_from_slice(bytes.as_ref(), bincode::config::standard())
                    .map_err(Self::make_transaction_error)?
                    .0
            } else {
                HashSet::new()
            };

            if services.insert(stored_info.clone()) {
                println!("Successfully registered self to database: {}", stored_info.address);
            }

            let updated_services_bytes = bincode::encode_to_vec(&services, bincode::config::standard())
                .map_err(Self::make_transaction_error)?;
            tx_db.insert(GROUP_NAME, updated_services_bytes)?;

            Ok(())
        });

        transaction_result.map_err(|e| Box::new(e) as Box<dyn std::error::Error>)
    }

    // 改进辅助函数命名，返回 ConflictableTransactionError
    fn make_transaction_error<E: std::fmt::Display>(e: E) -> ConflictableTransactionError {
        ConflictableTransactionError::Storage(sled::Error::Io(std::io::Error::new(
            std::io::ErrorKind::Other,
            e.to_string()
        )))
    }

    // 辅助方法：获取指定时间戳之后的所有服务信息
    async fn get_services_since(&self, since_timestamp: u64) -> Result<Vec<ServiceInfo>, Status> {
        let mut services = Vec::new();

        // 遍历所有组的服务信息
        if let Some(value) = self.db.get(GROUP_NAME).map_err(|e| Status::internal(format!("Database error: {}", e)))? {
            match bincode::decode_from_slice::<HashSet<StoredServiceInfo>, _>(&value, bincode::config::standard()) {
                Ok((stored_services, _)) => {
                    services.extend(
                        stored_services
                            .into_iter()
                            .filter(|s| s.timestamp > since_timestamp)
                            .map(ServiceInfo::from)
                    );
                }
                Err(e) => {
                    eprintln!("Failed to decode services for key {:?}: {}", String::from_utf8_lossy(GROUP_NAME), e);
                }
            }
        }

        // // 遍历所有组的服务信息
        // for item_result in self.db.iter() {
        //     let (key, value) = item_result.map_err(|e| Status::internal(format!("Database error: {}", e)))?;
        //
        //     // 跳过非服务数据（如last_sync_timestamp）
        //     if key.as_ref() == LAST_SYNC_KEY {
        //         continue;
        //     }
        //
        //     // 添加安全检查，防止过大的值
        //     if value.len() > 10 * 1024 * 1024 { // 10MB限制
        //         eprintln!("Warning: Skipping large value ({} bytes)", value.len());
        //         continue;
        //     }
        //
        //     match bincode::decode_from_slice::<HashSet<StoredServiceInfo>, _>(&value, bincode::config::standard()) {
        //         Ok((stored_services, _)) => {
        //             // 收集指定时间戳之后更新的服务信息
        //             services.extend(
        //                 stored_services
        //                     .into_iter()
        //                     .filter(|s| s.timestamp > since_timestamp)
        //                     .map(ServiceInfo::from)
        //             );
        //         }
        //         Err(e) => {
        //             eprintln!("Failed to decode services for key {:?}: {}", key, e);
        //             // 继续处理其他键值对
        //             continue;
        //         }
        //     }
        // }
        
        Ok(services)
    }

    // 获取当前时间戳
    fn current_timestamp() -> u64 {
        SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_secs()
    }

    // 获取上次同步时间戳
    fn get_last_sync_timestamp(&self) -> u64 {
        self.db
            .get(LAST_SYNC_KEY)
            .ok()
            .flatten()
            .and_then(|bytes| bincode::decode_from_slice(&bytes, bincode::config::standard()).ok())
            .map(|(ts, _): (u64, _)| ts)
            .unwrap_or(0)
    }

    // 更新最后同步时间戳
    fn update_last_sync_timestamp(&self, timestamp: u64) -> Result<(), sled::Error> {
        let encoded = bincode::encode_to_vec(&timestamp, bincode::config::standard())
            .map_err(|e| sled::Error::Io(std::io::Error::new(std::io::ErrorKind::Other, e)))?;
        self.db.insert(LAST_SYNC_KEY, encoded)?;
        self.db.flush()?;
        Ok(())
    }

    // 启动同步任务
    async fn start_sync_task(self) {
        let mut interval = time::interval(self.sync_interval);

        loop {
            interval.tick().await;

            let last_sync = self.get_last_sync_timestamp();
            let current_time = Self::current_timestamp();

            let states = self.db.get(GROUP_NAME)
                .ok()
                .flatten()
                .and_then(|bytes| bincode::decode_from_slice::<HashSet<StoredServiceInfo>, _>(&bytes, bincode::config::standard()).ok().map(|(s, _)| s))
                .unwrap_or_default();
            println!("Starting sync task. Last sync at {}, current time {}", last_sync, current_time);
            for state in states {
                println!("Known service: {:?}", state);
            }
            // 随机挑选3个节点进行同步，避免每次都同步所有节点
            let peers_to_sync: Vec<String> = {
                let peers = self.peers.read().unwrap();
                println!("Current peers: {:?}", *peers);
                peers
                    .choose_multiple(&mut rand::rng(), 3)
                    .cloned()
                    .collect()
            };

            // 对每个节点执行同步
            for peer in peers_to_sync.iter() {
                if let Err(e) = self.sync_with_peer(peer, last_sync).await {
                    eprintln!("Failed to sync with {}: {}", peer, e);
                    continue;
                }
            }

            // 更新最后同步时间
            // if let Err(e) = self.update_last_sync_timestamp(current_time) {
            //     eprintln!("Failed to update last sync timestamp: {}", e);
            // }
        }
    }

    // 与单个节点同步
    async fn sync_with_peer(&self, peer: &str, since_timestamp: u64) -> Result<(), Box<dyn std::error::Error>> {
        let mut client = lab_distributed::lab_distributed_service_client::LabDistributedServiceClient::connect(format!("http://{}", peer)).await?;

        // Pull: 获取远程节点的更新
        let response = client.gossip_pull(GossipPullRequest {
            since_timestamp,
        }).await?;

        let pulled_services = response.into_inner().services;
        
        // 限制单次处理的服务数量
        if !pulled_services.is_empty() {
            println!("Pulled {} services from peer {}", pulled_services.len(), peer);
            // Push: 将获取到的更新推送到本地
            self.gossip_push(Request::new(GossipPushRequest {
                services: pulled_services,
            })).await?;
        }

        // Push: 将本地更新推送到远程节点
        let local_updates = self.get_services_since(since_timestamp).await?;
        
        // 限制单次推送的服务数量
        if !local_updates.is_empty() {
            println!("Synced {} services with peer {}", local_updates.len(), peer);
            
            // 分批推送，避免一次推送过多数据

            // 推送本地更新
            client.gossip_push(GossipPushRequest {
                    services: local_updates,
            }).await?;
        }

        Ok(())
    }
}

#[tonic::async_trait]
impl LabDistributedService for LabDistributedServiceImpl {
    async fn gossip_push(
        &self,
        request: Request<GossipPushRequest>,
    ) -> Result<Response<GossipPushResponse>, Status> {
        let req = request.into_inner();

        println!("Received gossip push services:  {:?}", req.services);

        for service in req.services {

            let stored_info: StoredServiceInfo = StoredServiceInfo::from(service);

            let need_add_peer = {
                let peers = self.peers.read().unwrap();
                !peers.contains(&stored_info.address)
            };
            if need_add_peer {
                let mut peers = self.peers.write().unwrap();
                if !peers.contains(&stored_info.address) {
                    println!("Adding new peer to peer list: {}", stored_info.address);
                    peers.push(stored_info.address.clone());
                }
            }


            let transaction_result = self.db.transaction(|tx_db| -> Result<(), ConflictableTransactionError> {
                let current_services_bytes = tx_db.get(GROUP_NAME)?;
                let mut services: HashSet<StoredServiceInfo> = if let Some(bytes) = current_services_bytes {
                    bincode::decode_from_slice(bytes.as_ref(), bincode::config::standard())
                        .map_err(Self::make_transaction_error)?
                        .0
                } else {
                    HashSet::new()
                };

                // 更新服务信息 - 比较时间戳
                match services.get(&stored_info) {
                    Some(existing) => {
                        // 比较时间戳
                        if stored_info.timestamp > existing.timestamp {
                            services.replace(stored_info.clone());
                        }
                    }
                    None => {
                        services.insert(stored_info.clone());

                    }
                }


                let updated_services_bytes = bincode::encode_to_vec(&services, bincode::config::standard())
                    .map_err(Self::make_transaction_error)?;
                tx_db.insert(GROUP_NAME, updated_services_bytes)?;

                Ok(())
            });

            if let Err(e) = transaction_result {
                return Err(Status::internal(format!("Database transaction failed: {}", e)));
            }


        }

        Ok(Response::new(GossipPushResponse { success: true }))
    }

    async fn gossip_pull(
        &self,
        request: Request<GossipPullRequest>,
    ) -> Result<Response<GossipPullResponse>, Status> {
        let req = request.into_inner();
        println!("Received gossip pull request since timestamp {}", req.since_timestamp);
        // 添加安全检查
        if req.since_timestamp > Self::current_timestamp() + 3600 { // 允许1小时的时钟偏差
            return Err(Status::invalid_argument("Invalid timestamp"));
        }
        
        let services = self.get_services_since(req.since_timestamp).await?;
        println!("Returning services {:?}", services);
        Ok(Response::new(GossipPullResponse { services }))
    }
}

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    // 加载配置
    let settings = Settings::new()?;

    let addr = settings.get_server_addr().parse()?;
    println!("Server listening on {}", addr);

    let service = LabDistributedServiceImpl::new(&settings);

    // 注册自己的信息到数据库
    service.register_self(settings.get_server_addr())?;

    let service_clone = service.clone();

    // 启动同步任务
    tokio::spawn(async move {
        service_clone.start_sync_task().await;
    });

    Server::builder()
        .add_service(LabDistributedServiceServer::new(service))
        .serve(addr)
        .await?;

    Ok(())
}