use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};

// no file/WAL responsibilities here; queue is purely in-memory for sending
use crate::command::{Command, CommandBroadcaster};
use tokio::sync::{Mutex, Notify, broadcast, mpsc};
use tracing::info;

/// Trait for queue records that can be measured and serialized for append-only logging.
///
/// A blanket implementation is provided for `prost::Message` types.
pub trait QueueRecord: Send + Sync + 'static {}

impl<T> QueueRecord for T where T: prost::Message + Send + Sync + 'static {}

#[derive(Debug)]
pub enum QueueError {
    Closed,
}

impl std::fmt::Display for QueueError {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            QueueError::Closed => write!(f, "queue closed"),
        }
    }
}

impl std::error::Error for QueueError {}

// WAL is purposefully not handled by this queue implementation.

struct QueuedItem<T> {
    record: T,
}

pub struct ExporterQueue<T: QueueRecord> {
    tx: mpsc::Sender<QueuedItem<T>>,
    rx: Mutex<Option<mpsc::Receiver<QueuedItem<T>>>>,
    is_shutdown: AtomicBool,
    shutdown_request: Notify,
    drained: Notify,
    /// request to purge all in-memory queued items immediately
    purge_request: Notify,
    command_broadcaster: Option<Arc<CommandBroadcaster>>,
    network_available: AtomicBool,
}

impl<T: QueueRecord> ExporterQueue<T> {
    pub async fn new(
        capacity_items: usize,
        command_broadcaster: Option<Arc<CommandBroadcaster>>,
    ) -> Result<Arc<Self>, QueueError> {
        let (tx, rx) = mpsc::channel::<QueuedItem<T>>(capacity_items);
        Ok(Arc::new(Self {
            tx,
            rx: Mutex::new(Some(rx)),
            is_shutdown: AtomicBool::new(false),
            shutdown_request: Notify::new(),
            drained: Notify::new(),
            purge_request: Notify::new(),
            command_broadcaster,
            network_available: AtomicBool::new(true),
        }))
    }

    /// Enqueue a record into the in-memory channel (bounded by item count).
    pub async fn enqueue(&self, record: T) -> Result<(), QueueError> {
        if self.is_shutdown.load(Ordering::Relaxed) {
            return Err(QueueError::Closed);
        }
        let item = QueuedItem { record };
        match self.tx.send(item).await {
            Ok(()) => Ok(()),
            Err(_e) => Err(QueueError::Closed),
        }
    }

    /// Start a single worker task that drains the queue and invokes `handler(record).await`.
    ///
    /// Only one worker may be started; subsequent calls will return false.
    pub async fn start_worker<F, Fut>(self: &Arc<Self>, mut handler: F) -> bool
    where
        F: FnMut(T) -> Fut + Send + 'static,
        Fut: std::future::Future<Output = ()> + Send + 'static,
    {
        let mut rx_opt = self.rx.lock().await;
        let rx = match rx_opt.take() {
            Some(r) => r,
            None => return false,
        };
        let queue = Arc::clone(self);
        tokio::spawn(async move {
            let mut rx = rx;
            let mut command_rx = queue.command_broadcaster.as_ref().map(|cb| cb.subscribe());

            loop {
                tokio::select! {
                    biased;
                    _ = queue.shutdown_request.notified() => {
                        // Drop remaining items without processing, then exit.
                        while let Some(_item) = rx.try_recv().ok() {
                            // drop
                        }
                        break;
                    }
                    _ = queue.purge_request.notified() => {
                        while let Some(_item) = rx.try_recv().ok() {
                            // drop items
                        }
                    }
                    command_result = async {
                        match &mut command_rx {
                            Some(cmd_rx) => cmd_rx.recv().await,
                            None => std::future::pending().await,
                        }
                    } => {
                        match command_result {
                            Ok(Command::LinkUp) => {
                                info!("Network link is up - enabling export");
                                queue.network_available.store(true, Ordering::Relaxed);
                            }
                            Ok(Command::LinkDown) => {
                                info!("Network link is down - pausing export");
                                queue.network_available.store(false, Ordering::Relaxed);
                            }
                            Err(broadcast::error::RecvError::Lagged(skipped)) => {
                                info!("Command receiver lagged, skipped {} commands", skipped);
                            }
                            Err(broadcast::error::RecvError::Closed) => {
                                info!("Command broadcaster closed");
                                command_rx = None;
                            }
                        }
                    }
                    maybe_item = rx.recv() => {
                        if let Some(QueuedItem { record }) = maybe_item {
                            handler(record).await;
                        } else {
                            break;
                        }
                    }
                }
                   
            }
            queue.drained.notify_waiters();
        });
        true
    }

    pub fn is_network_available(&self) -> bool {
        self.network_available.load(Ordering::Relaxed)
    }

    pub fn set_network_available(&self, available: bool) {
        self.network_available.store(available, Ordering::Relaxed);
    }

    /// Pause sending and drop all currently queued items (does not shut down the queue)
    pub fn pause(&self) {
        self.network_available.store(false, Ordering::Relaxed);
        self.purge_request.notify_waiters();
    }

    /// Resume sending
    pub fn resume(&self) {
        self.network_available.store(true, Ordering::Relaxed);
    }

    pub fn get_command_broadcaster(&self) -> Option<&Arc<CommandBroadcaster>> {
        self.command_broadcaster.as_ref()
    }

    /// Graceful shutdown: stop accepting new items and wait for the consumer to drain.
    pub async fn graceful_shutdown(&self) {
        self.is_shutdown.store(true, Ordering::Relaxed);
        // Request consumer to drain and exit.
        self.shutdown_request.notify_waiters();
        {
            // If no consumer was started, notify drained immediately to avoid hanging.
            let rx_guard = self.rx.lock().await;
            if rx_guard.is_some() {
                self.drained.notify_waiters();
            }
        }
        // Wait for consumer task to signal drained (or immediate if none).
        let _ = self.drained.notified().await;
    }
}

/// Ensure a queue exists in the given slot. If absent, create one with provided capacity.
pub async fn ensure_queue<T: QueueRecord>(
    slot: &tokio::sync::Mutex<Option<Arc<ExporterQueue<T>>>>,
    capacity_items: usize,
    command_broadcaster: Option<Arc<CommandBroadcaster>>,
) -> Option<Arc<ExporterQueue<T>>> {
    let mut guard = slot.lock().await;
    if guard.is_none() {
        if let Ok(q) = ExporterQueue::<T>::new(capacity_items, command_broadcaster).await {
            *guard = Some(q);
        }
    }
    guard.clone()
}
