use serde::Deserialize;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Mutex as TokioMutex;
use tracing::info;

use crate::consumer::{ConsumeLogs, ConsumeMetrics, ConsumeTraces};
use crate::pipeline::{LogsProcessor, MetricsProcessor, TracesProcessor};
use pdata::internal::data::protogen::collector::logs::v1::ExportLogsServiceRequest;
use pdata::internal::data::protogen::collector::metrics::v1::ExportMetricsServiceRequest;
use pdata::internal::data::protogen::collector::trace::v1::ExportTraceServiceRequest;
use pdata::internal::data::protogen::metrics::v1::metric::Data as MetricData;

/// Configuration for batch processors.
#[derive(Debug, Clone, Deserialize)]
pub struct BatchProcessorConfig {
    pub send_batch_size: usize,
    pub timeout: u64,
}

impl Default for BatchProcessorConfig {
    fn default() -> Self {
        Self {
            send_batch_size: 8192,
            timeout: 10,
        }
    }
}

// Traces
/// Batch processor for traces that accumulates spans before sending downstream.
#[derive(Debug, Clone)]
pub struct TracesBatchProcessor {
    config: BatchProcessorConfig,
    buffer: ExportTraceServiceRequest,
    current_item_amount: usize,
}

impl TracesBatchProcessor {
    /// Create a new traces batch processor with the given configuration.
    pub fn new(config: BatchProcessorConfig) -> Self {
        Self {
            config,
            buffer: ExportTraceServiceRequest {
                resource_spans: Vec::new(),
            },
            current_item_amount: 0,
        }
    }

    /// Append the telemetry request to the buffer and check if the buffer is full
    pub fn append(
        &mut self,
        mut data: ExportTraceServiceRequest,
    ) -> Option<ExportTraceServiceRequest> {
        self.current_item_amount += data.count_items();
        self.buffer.resource_spans.append(&mut data.resource_spans);
        if self.current_item_amount >= self.config.send_batch_size {
            let batch = std::mem::take(&mut self.buffer);
            self.current_item_amount = 0;
            Some(batch)
        } else {
            None
        }
    }

    /// Flush the current buffer if it contains any items
    pub fn flush(&mut self) -> Option<ExportTraceServiceRequest> {
        if self.current_item_amount == 0 {
            return None;
        }
        let batch = std::mem::take(&mut self.buffer);
        self.current_item_amount = 0;
        Some(batch)
    }
}

impl Default for TracesBatchProcessor {
    fn default() -> Self {
        Self::new(BatchProcessorConfig::default())
    }
}

struct TracesBatchConsumer {
    batcher: Arc<TokioMutex<TracesBatchProcessor>>,
    next: Arc<dyn ConsumeTraces + Send + Sync + 'static>,
}

#[tonic::async_trait]
impl ConsumeTraces for TracesBatchConsumer {
    async fn consume(&self, data: ExportTraceServiceRequest) {
        let batch = {
            let mut guard = self.batcher.lock().await;
            let batch = guard.append(data);
            info!(
                "traces current_item_amount: {}/{}",
                guard.current_item_amount, guard.config.send_batch_size
            );
            batch
        };

        if let Some(batch) = batch {
            self.next.consume(batch).await;
        }
    }
}

impl TracesProcessor for TracesBatchProcessor {
    fn wrap(
        &self,
        next: Arc<dyn ConsumeTraces + Send + Sync + 'static>,
    ) -> Arc<dyn ConsumeTraces + Send + Sync + 'static> {
        let batcher = Arc::new(TokioMutex::new(TracesBatchProcessor::new(
            self.config.clone(),
        )));
        let timeout_secs = self.config.timeout;
        let batcher_for_task = batcher.clone();
        let next_for_task = next.clone();
        tokio::spawn(async move {
            let mut ticker = tokio::time::interval(Duration::from_secs(timeout_secs));
            loop {
                ticker.tick().await;
                info!("traces batch timeout: {} seconds", timeout_secs);
                let batch = {
                    let mut guard = batcher_for_task.lock().await;
                    guard.flush()
                };
                if let Some(batch) = batch {
                    next_for_task.consume(batch).await;
                }
            }
        });
        Arc::new(TracesBatchConsumer { batcher, next })
    }
}

// Metrics
/// Batch processor for metrics that accumulates metrics before sending downstream.
#[derive(Debug, Clone)]
pub struct MetricsBatchProcessor {
    config: BatchProcessorConfig,
    buffer: ExportMetricsServiceRequest,
    current_item_amount: usize,
}

impl MetricsBatchProcessor {
    pub fn new(config: BatchProcessorConfig) -> Self {
        Self {
            config,
            buffer: ExportMetricsServiceRequest::default(),
            current_item_amount: 0,
        }
    }
    /// Append the telemetry request to the buffer and check if the buffer is full
    pub fn append(
        &mut self,
        mut data: ExportMetricsServiceRequest,
    ) -> Option<ExportMetricsServiceRequest> {
        self.current_item_amount += data.count_items();
        self.buffer
            .resource_metrics
            .append(&mut data.resource_metrics);
        if self.current_item_amount >= self.config.send_batch_size {
            let batch = std::mem::take(&mut self.buffer);
            self.current_item_amount = 0;
            Some(batch)
        } else {
            None
        }
    }

    /// Flush the current buffer if it contains any items
    pub fn flush(&mut self) -> Option<ExportMetricsServiceRequest> {
        if self.current_item_amount == 0 {
            return None;
        }
        let batch = std::mem::take(&mut self.buffer);
        self.current_item_amount = 0;
        Some(batch)
    }
}

impl Default for MetricsBatchProcessor {
    fn default() -> Self {
        Self::new(BatchProcessorConfig::default())
    }
}

struct MetricsBatchConsumer {
    batcher: Arc<TokioMutex<MetricsBatchProcessor>>,
    next: Arc<dyn ConsumeMetrics + Send + Sync + 'static>,
}

#[tonic::async_trait]
impl ConsumeMetrics for MetricsBatchConsumer {
    async fn consume(&self, data: ExportMetricsServiceRequest) {
        let batch = {
            let mut guard = self.batcher.lock().await;
            let batch = guard.append(data);
            info!(
                "metrics current_item_amount: {}/{}",
                guard.current_item_amount, guard.config.send_batch_size
            );
            batch
        };

        if let Some(batch) = batch {
            self.next.consume(batch).await;
        }
    }
}

impl MetricsProcessor for MetricsBatchProcessor {
    fn wrap(
        &self,
        next: Arc<dyn ConsumeMetrics + Send + Sync + 'static>,
    ) -> Arc<dyn ConsumeMetrics + Send + Sync + 'static> {
        let batcher = Arc::new(TokioMutex::new(MetricsBatchProcessor::new(
            self.config.clone(),
        )));
        let timeout_secs = self.config.timeout;
        let batcher_for_task = batcher.clone();
        let next_for_task = next.clone();
        tokio::spawn(async move {
            let mut ticker = tokio::time::interval(Duration::from_secs(timeout_secs));
            loop {
                ticker.tick().await;
                info!("metrics batch timeout: {} seconds", timeout_secs);
                let batch = {
                    let mut guard = batcher_for_task.lock().await;
                    guard.flush()
                };
                if let Some(batch) = batch {
                    next_for_task.consume(batch).await;
                }
            }
        });
        Arc::new(MetricsBatchConsumer { batcher, next })
    }
}

// Logs
/// Batch processor for logs that accumulates log records before sending downstream.
#[derive(Debug, Clone)]
pub struct LogsBatchProcessor {
    config: BatchProcessorConfig,
    buffer: ExportLogsServiceRequest,
    current_item_amount: usize,
}

impl LogsBatchProcessor {
    pub fn new(config: BatchProcessorConfig) -> Self {
        Self {
            config,
            buffer: ExportLogsServiceRequest::default(),
            current_item_amount: 0,
        }
    }
    /// Append the telemetry request to the buffer and check if the buffer is full
    pub fn append(
        &mut self,
        mut data: ExportLogsServiceRequest,
    ) -> Option<ExportLogsServiceRequest> {
        self.current_item_amount += data.count_items();
        self.buffer.resource_logs.append(&mut data.resource_logs);
        if self.current_item_amount >= self.config.send_batch_size {
            let batch = std::mem::take(&mut self.buffer);
            self.current_item_amount = 0;
            Some(batch)
        } else {
            None
        }
    }

    /// Flush the current buffer if it contains any items
    pub fn flush(&mut self) -> Option<ExportLogsServiceRequest> {
        if self.current_item_amount == 0 {
            return None;
        }
        let batch = std::mem::take(&mut self.buffer);
        self.current_item_amount = 0;
        Some(batch)
    }
}

impl Default for LogsBatchProcessor {
    fn default() -> Self {
        Self::new(BatchProcessorConfig::default())
    }
}

struct LogsBatchConsumer {
    batcher: Arc<TokioMutex<LogsBatchProcessor>>,
    next: Arc<dyn ConsumeLogs + Send + Sync + 'static>,
}

#[tonic::async_trait]
impl ConsumeLogs for LogsBatchConsumer {
    async fn consume(&self, data: ExportLogsServiceRequest) {
        let batch = {
            let mut guard = self.batcher.lock().await;
            let batch = guard.append(data);
            info!(
                "logs current_item_amount: {}/{}",
                guard.current_item_amount, guard.config.send_batch_size
            );
            batch
        };

        if let Some(batch) = batch {
            self.next.consume(batch).await;
        }
    }
}

impl LogsProcessor for LogsBatchProcessor {
    fn wrap(
        &self,
        next: Arc<dyn ConsumeLogs + Send + Sync + 'static>,
    ) -> Arc<dyn ConsumeLogs + Send + Sync + 'static> {
        let batcher = Arc::new(TokioMutex::new(LogsBatchProcessor::new(
            self.config.clone(),
        )));
        let timeout_secs = self.config.timeout;
        let batcher_for_task = batcher.clone();
        let next_for_task = next.clone();
        tokio::spawn(async move {
            let mut ticker = tokio::time::interval(Duration::from_secs(timeout_secs));
            loop {
                ticker.tick().await;
                info!("logs batch timeout: {} seconds", timeout_secs);
                let batch = {
                    let mut guard = batcher_for_task.lock().await;
                    guard.flush()
                };
                if let Some(batch) = batch {
                    next_for_task.consume(batch).await;
                }
            }
        });
        Arc::new(LogsBatchConsumer { batcher, next })
    }
}

/// Count the number of items in the OTLP request
pub trait CountItems {
    fn count_items(&self) -> usize;
}

impl CountItems for ExportTraceServiceRequest {
    fn count_items(&self) -> usize {
        self.resource_spans
            .iter()
            .flat_map(|rs| rs.scope_spans.iter())
            .map(|ss| ss.spans.len())
            .sum()
    }
}

impl CountItems for ExportMetricsServiceRequest {
    fn count_items(&self) -> usize {
        self.resource_metrics
            .iter()
            .flat_map(|rm| rm.scope_metrics.iter())
            .flat_map(|sm| sm.metrics.iter())
            .map(|m| match &m.data {
                Some(data) => match data {
                    MetricData::Gauge(g) => g.data_points.len(),
                    MetricData::Sum(s) => s.data_points.len(),
                    MetricData::Histogram(h) => h.data_points.len(),
                    MetricData::ExponentialHistogram(h) => h.data_points.len(),
                    MetricData::Summary(s) => s.data_points.len(),
                },
                None => 0,
            })
            .sum()
    }
}

impl CountItems for ExportLogsServiceRequest {
    fn count_items(&self) -> usize {
        self.resource_logs
            .iter()
            .flat_map(|rl| rl.scope_logs.iter())
            .map(|sl| sl.log_records.len())
            .sum()
    }
}
