//! OTLP/HTTP exporter.
//!
//! Sends OTLP protobuf payloads over HTTP POST to a configured endpoint.
//! - Supports optional gzip compression
//! - Best-effort delivery with timeout and exponential backoff retries
//! - Used for both Metrics and Traces via the `Consume*` traits
use crate::exporter::utils::encode_body;
use reqwest::Client;
use std::path::PathBuf;
use std::sync::{Arc, atomic::AtomicUsize};
use std::time::Duration;
use tokio::sync::Mutex;
use tokio::time::interval;
use tracing::{info, warn};

use pdata::internal::data::protogen::collector::{
    logs::v1::ExportLogsServiceRequest, metrics::v1::ExportMetricsServiceRequest,
    trace::v1::ExportTraceServiceRequest,
};

use crate::command::{Command, CommandBroadcaster};
use crate::config::Config;
use crate::consumer::{ConsumeLogs, ConsumeMetrics, ConsumeTraces};
use crate::exporter::queue::{ExporterQueue, QueueError, ensure_queue as ensure_queue_in_slot};
use crate::exporter::wal::{WalDataType, WalWriter};
use std::sync::atomic::{AtomicBool, Ordering};

#[derive(Clone, Debug)]
pub struct HttpSendConfig {
    pub use_gzip: bool,
    pub timeout: Duration,
    pub max_retries: usize,
    pub base_backoff: Duration,
}

pub struct OtlpHttpExporter {
    endpoint: String,
    client: Client,
    cfg: HttpSendConfig,
    metrics_q: Mutex<Option<Arc<ExporterQueue<ExportMetricsServiceRequest>>>>,
    traces_q: Mutex<Option<Arc<ExporterQueue<ExportTraceServiceRequest>>>>,
    logs_q: Mutex<Option<Arc<ExporterQueue<ExportLogsServiceRequest>>>>,
    wal_dir: Option<PathBuf>,
    wal_writer: Option<WalWriter>,
    replay_in_progress: AtomicBool,
    cancel_token: Mutex<tokio_util::sync::CancellationToken>,
    active_tasks: Mutex<Vec<tokio::task::JoinHandle<()>>>,
    queue_capacity_items: usize,
}

impl OtlpHttpExporter {
    /// Start one telemetry pipeline: ensure queue, spawn sender and reader
    async fn start_pipeline<T: prost::Message + Default + Send + Sync + 'static>(
        self: &Arc<Self>,
        slot: &tokio::sync::Mutex<Option<Arc<ExporterQueue<T>>>>,
        data_type: WalDataType,
        root_dir: PathBuf,
        parent_cancel: tokio_util::sync::CancellationToken,
    ) {
        let q_opt = ensure_queue_in_slot::<T>(slot, self.queue_capacity_items, None).await;
        if let Some(q) = q_opt {
            // start sender first to avoid queue buildup
            self.spawn_sender(&q, data_type).await;
            // spawn reader with a child token
            let child = parent_cancel.child_token();
            self.spawn_reader::<T>(q.clone(), data_type, root_dir, child).await;
        } else {
            warn!(target: "exporter::otlp_http", "Queue for {:?} not available; skip starting pipeline", data_type);
        }
    }
    pub async fn new(cfg: &Config) -> Result<Arc<Self>, QueueError> {
        let client = Client::builder()
            .gzip(true)
            .brotli(true)
            .deflate(true)
            .build()
            .expect("build reqwest client");

        let endpoint = cfg.exporters.http.endpoint.clone();
        let http_cfg = HttpSendConfig {
            use_gzip: true,
            timeout: Duration::from_secs(10),
            max_retries: 3,
            base_backoff: Duration::from_millis(200),
        };

        let wal_root = {
            let p = cfg.exporters.http.queue.persistent.trim();
            if p.is_empty() {
                None
            } else {
                Some(PathBuf::from(p))
            }
        };

        let wal_writer = wal_root.as_ref().map(|root| WalWriter::new(root.clone()));

        let capacity_items = cfg.exporters.http.queue.capacity_items as usize;
        let metrics_q =
            ExporterQueue::<ExportMetricsServiceRequest>::new(capacity_items, None).await?;
        let traces_q =
            ExporterQueue::<ExportTraceServiceRequest>::new(capacity_items, None).await?;
        let logs_q = ExporterQueue::<ExportLogsServiceRequest>::new(capacity_items, None).await?;

        let exporter = Arc::new(Self {
            endpoint,
            client,
            cfg: http_cfg,
            metrics_q: tokio::sync::Mutex::new(Some(metrics_q.clone())),
            traces_q: tokio::sync::Mutex::new(Some(traces_q.clone())),
            logs_q: tokio::sync::Mutex::new(Some(logs_q.clone())),
            wal_dir: wal_root.clone(),
            wal_writer,
            replay_in_progress: AtomicBool::new(false),
            cancel_token: tokio::sync::Mutex::new(tokio_util::sync::CancellationToken::new()),
            active_tasks: Mutex::new(Vec::new()),
            queue_capacity_items: capacity_items,
        });

        Ok(exporter)
    }

    pub async fn trigger_replay(self: &Arc<Self>) {
        if self.replay_in_progress.swap(true, Ordering::Relaxed) {
            return;
        }
        let Some(root) = self.wal_dir.clone() else {
            return;
        };

        // Rebuild a fresh cancel token for this replay batch
        let parent = {
            let mut g = self.cancel_token.lock().await;
            *g = tokio_util::sync::CancellationToken::new();
            g.clone()
        };

        self.start_pipeline::<ExportMetricsServiceRequest>(&self.metrics_q, WalDataType::Metrics, root.clone(), parent.clone()).await;
        self.start_pipeline::<ExportTraceServiceRequest>(&self.traces_q, WalDataType::Traces, root.clone(), parent.clone()).await;
        self.start_pipeline::<ExportLogsServiceRequest>(&self.logs_q, WalDataType::Logs, root.clone(), parent.clone()).await;
    }

    /// Stop all workers (readers and senders) gracefully
    pub async fn stop_workers(&self) {
        info!(target: "exporter::otlp_http", "Stopping all WAL workers gracefully");

        // Stop only readers; drop queues to release memory but do NOT graceful_shutdown to avoid processing backlog

        // Then cancel reader tasks
        if let Ok(g) = self.cancel_token.try_lock() {
            g.cancel();
        } else {
            // fallback: lock and cancel
            let g = self.cancel_token.lock().await;
            g.cancel();
        }

        let handles = {
            let mut tasks = self.active_tasks.lock().await;
            std::mem::take(&mut *tasks)
        };

        info!(target: "exporter::otlp_http", "Waiting for {} reader tasks to complete", handles.len());

        let join_futures: Vec<_> = handles
            .into_iter()
            .map(|handle| async move { tokio::time::timeout(Duration::from_secs(5), handle).await })
            .collect();

        let results = futures::future::join_all(join_futures).await;

        let mut completed = 0;
        let mut timed_out = 0;
        for result in results {
            match result {
                Ok(Ok(())) => completed += 1,
                Ok(Err(_)) => completed += 1,
                Err(_) => timed_out += 1,
            }
        }

        info!(target: "exporter::otlp_http", "Worker shutdown complete: {} completed, {} timed out", completed, timed_out);
        self.replay_in_progress.store(false, Ordering::Relaxed);
    }

    async fn spawn_sender<T>(self: &Arc<Self>, q: &Arc<ExporterQueue<T>>, data_type: WalDataType)
    where
        T: prost::Message + Send + Sync + 'static,
    {
        info!(target: "exporter::otlp_http", "Spawning sender for data type: {}", data_type);
        let wal_writer = self.wal_writer.as_ref().map(|w| w.clone());
        let start_index = if let Some(ref writer) = wal_writer {
            writer
                .read_state(data_type)
                .await
                .unwrap_or_default()
                .sent_index
        } else {
            0
        };

        let shared_index = Arc::new(Mutex::new(start_index));
        let exp = self.clone();

        let _ = q
            .start_worker(move |data: T| {
                let exp = exp.clone();
                let shared_index = shared_index.clone();
                let wal_writer = wal_writer.clone();

                async move {
                    Self::handle_export_data(exp, data, data_type, shared_index, wal_writer).await
                }
            })
            .await;
    }

    /// Handle individual data export with state tracking
    async fn handle_export_data<T>(
        exp: Arc<Self>,
        data: T,
        data_type: WalDataType,
        shared_index: Arc<Mutex<u64>>,
        wal_writer: Option<WalWriter>,
    ) where
        T: prost::Message,
    {
        let (body, enc) = exp.encode_body::<T>(&data);
        if exp.post_bytes(body, enc, data_type).await {
            let frame_size = 4u64 + T::encoded_len(&data) as u64;
            let mut idx = shared_index.lock().await;
            *idx = idx.saturating_add(frame_size);

            if let Some(ref writer) = wal_writer {
                let _ = writer.update_sent_state(data_type, *idx).await;
            }
        }
    }

    async fn spawn_reader<T: prost::Message + Default + Send + Sync + 'static>(
        self: &Arc<Self>,
        q: Arc<ExporterQueue<T>>,
        data_type: WalDataType,
        root_dir: PathBuf,
        cancel_token: tokio_util::sync::CancellationToken,
    ) {
        info!(target: "exporter::otlp_http", "Spawning persistent reader for data type: {}", data_type);

        let handle = tokio::spawn(async move {
            let wal_writer = WalWriter::new(root_dir.clone());
            let mut last_read_index = wal_writer
                .read_state(data_type)
                .await
                .unwrap_or_default()
                .sent_index;
            let mut poll_interval = interval(Duration::from_secs(1));

            loop {
                tokio::select! {
                    _ = cancel_token.cancelled() => {
                        info!(target: "exporter::otlp_http", "Reader for {} received cancellation signal, shutting down gracefully", data_type);
                        break;
                    }
                    _ = poll_interval.tick() => {
                        let read_count = Arc::new(AtomicUsize::new(0));
                        let wal_path = data_type.wal_path(&root_dir);
                        let end = match tokio::fs::metadata(&wal_path).await { Ok(m) => m.len(), Err(_) => last_read_index };

                        let _ = wal_writer.read_frames_streaming(data_type, last_read_index, |payload_bytes| {
                            let q = q.clone();
                            let counter = read_count.clone();
                            async move {
                                if let Ok(msg) = T::decode(payload_bytes.as_slice()) {
                                    counter.fetch_add(1, Ordering::Relaxed);
                                    let _ = q.enqueue(msg).await;
                                }
                            }
                        }).await;

                        let frames_read = read_count.load(Ordering::Relaxed);
                        if frames_read > 0 {
                            last_read_index = end;
                            info!(target: "exporter::otlp_http", "Read {} frames for {}, advanced index to {} (snapshot)", frames_read, data_type, last_read_index);
                        }
                    }
                }
            }

            info!(target: "exporter::otlp_http", "Reader for {} has shut down", data_type);
        });

        {
            let mut tasks = self.active_tasks.lock().await;
            tasks.push(handle);
        }
    }

    /// Subscribe to an external command broadcaster to trigger replay when link resumes.
    pub fn subscribe_to_commands(self: &Arc<Self>, broadcaster: Arc<CommandBroadcaster>) {
        info!(target: "exporter::otlp_http", "Subscribing to commands");
        let exp = self.clone();
        let mut rx = broadcaster.subscribe();
        tokio::spawn(async move {
            loop {
                match rx.recv().await {
                    Ok(Command::LinkUp) => {
                        info!(target: "exporter::otlp_http", "received command: Link up, triggering replay");
                        exp.trigger_replay().await;
                    }
                    Ok(Command::LinkDown) => {
                        info!(target: "exporter::otlp_http", "received command: Link down, stopping workers");
                        // cancel readers first
                        exp.stop_workers().await;
                        // drop queues to reclaim memory
                        {
                            let mut g = exp.metrics_q.lock().await;
                            *g = None;
                        }
                        {
                            let mut g = exp.traces_q.lock().await;
                            *g = None;
                        }
                        {
                            let mut g = exp.logs_q.lock().await;
                            *g = None;
                        }
                        if let Some(writer) = &exp.wal_writer {
                            let _ = writer.compact(WalDataType::Traces).await;
                            let _ = writer.compact(WalDataType::Metrics).await;
                            let _ = writer.compact(WalDataType::Logs).await;
                        }
                    }
                    Err(_) => break,
                }
            }
        });
    }

    /// Encode an OTLP protobuf message into a request body, optionally gzip-compressed.
    fn encode_body<T: prost::Message>(&self, data: &T) -> (Vec<u8>, Option<&'static str>) {
        encode_body(self.cfg.use_gzip, data)
    }

    /// Perform a POST with retries and exponential backoff for transient failures.
    ///
    /// payload_bytes: protobuf-encoded request body
    /// content_encoding_header: e.g. Some("gzip") when compression is enabled
    /// data_type: The type of data being sent (for endpoint path)
    async fn post_bytes(
        &self,
        payload_bytes: Vec<u8>,
        content_encoding_header: Option<&'static str>,
        data_type: WalDataType,
    ) -> bool {
        let mut attempt = 0usize;
        let mut backoff = self.cfg.base_backoff;

        // Build the full URL with OTLP-specific path
        let full_url = match data_type {
            WalDataType::Traces => format!("{}/v1/traces", self.endpoint),
            WalDataType::Metrics => format!("{}/v1/metrics", self.endpoint),
            WalDataType::Logs => format!("{}/v1/logs", self.endpoint),
        };

        loop {
            let mut req_builder = self
                .client
                .post(&full_url)
                .header(reqwest::header::CONTENT_TYPE, "application/x-protobuf")
                .header(reqwest::header::ACCEPT, "application/x-protobuf");
            if let Some(enc) = content_encoding_header {
                req_builder = req_builder.header(reqwest::header::CONTENT_ENCODING, enc);
            }

            let resp = req_builder.body(payload_bytes.clone()).send().await;
            match resp {
                Ok(r) => {
                    let status = r.status();
                    if status.is_success() {
                        return true;
                    } else if status.is_server_error() && attempt < self.cfg.max_retries {
                        // HTTP error
                        warn!(target: "exporter::otlp_http", "HTTP 5xx, retrying attempt={} status={} endpoint={}", attempt + 1, status, full_url);
                    } else {
                        warn!(target: "exporter::otlp_http", "HTTP export failed: status={} endpoint={}", status, full_url);
                        return false;
                    }
                }
                // Not sent out error
                Err(err) => {
                    if attempt < self.cfg.max_retries {
                        warn!(target: "exporter::otlp_http", "HTTP error, retrying attempt={} endpoint={} error={}", attempt + 1, full_url, err);
                    } else {
                        warn!(target: "exporter::otlp_http", "HTTP export error (giving up) endpoint={} error={}", full_url, err);
                        return false;
                    }
                }
            }

            tokio::time::sleep(backoff).await;
            backoff = std::cmp::min(backoff * 2, std::time::Duration::from_secs(120));
            attempt += 1;
        }
    }
}

#[tonic::async_trait]
impl ConsumeMetrics for OtlpHttpExporter {
    async fn consume(&self, data: ExportMetricsServiceRequest) {
        if let Some(wal_writer) = &self.wal_writer {
            wal_writer.append(&data).await;
        }
    }
}

#[tonic::async_trait]
impl ConsumeTraces for OtlpHttpExporter {
    async fn consume(&self, data: ExportTraceServiceRequest) {
        if let Some(wal_writer) = &self.wal_writer {
            wal_writer.append(&data).await;
        }
    }
}

#[tonic::async_trait]
impl ConsumeLogs for OtlpHttpExporter {
    async fn consume(&self, data: ExportLogsServiceRequest) {
        if let Some(wal_writer) = &self.wal_writer {
            wal_writer.append(&data).await;
        }
    }
}
