use std::any::Any;
use std::cmp::min;
use std::fmt::Formatter;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};

use datafusion::arrow::array::RecordBatch;
use datafusion::arrow::datatypes::SchemaRef;
use datafusion::error::DataFusionError;
use datafusion::execution::{RecordBatchStream, SendableRecordBatchStream, TaskContext};
use datafusion::physical_expr::{Partitioning, PhysicalSortExpr};
use datafusion::physical_plan::{DisplayAs, DisplayFormatType, ExecutionPlan};
use futures_core::Stream;

use engine::{CHUNK_SIZE, TableScanOptionRef};

use crate::mem_table::MemTableRef;

#[derive(Debug)]
pub struct MemTableScan {
    mem_table: MemTableRef,
    schema: SchemaRef,
    option: TableScanOptionRef,
}

#[derive(Debug)]
struct MemTableReadStream {
    mem_table: MemTableRef,
    schema: SchemaRef,
    send_index: usize,
    read_len: usize,
    option: TableScanOptionRef,
}

impl MemTableScan {
    #[inline]
    pub(super) fn create(mem_table: MemTableRef, schema: SchemaRef, option: TableScanOptionRef) -> Self {
        Self {
            mem_table,
            schema,
            option,
        }
    }
}

impl DisplayAs for MemTableScan {
    fn fmt_as(&self, _t: DisplayFormatType, f: &mut Formatter) -> std::fmt::Result {
        write!(f, "MemTableScan")
    }
}

impl ExecutionPlan for MemTableScan {
    fn as_any(&self) -> &dyn Any {
        self
    }

    fn schema(&self) -> SchemaRef {
        self.schema.clone()
    }

    fn output_partitioning(&self) -> Partitioning {
        Partitioning::RoundRobinBatch(1)
    }

    fn output_ordering(&self) -> Option<&[PhysicalSortExpr]> {
        None
    }

    fn children(&self) -> Vec<Arc<dyn ExecutionPlan>> {
        // debug!(target: "engine:mem", "call children");
        vec![]
    }

    fn with_new_children(self: Arc<Self>, _children: Vec<Arc<dyn ExecutionPlan>>) -> datafusion::common::Result<Arc<dyn ExecutionPlan>> {
        // debug!(target: "engine:mem", "call with_new_children, children len: {}", children.len());
        Ok(self.clone())
    }

    fn execute(&self, partition: usize, _context: Arc<TaskContext>) -> datafusion::common::Result<SendableRecordBatchStream> {
        assert_eq!(0, partition);
        Ok(Box::pin(MemTableReadStream::create(self.mem_table.clone(), self.option.clone())))
    }
}

impl MemTableReadStream {
    fn create(mem_table: MemTableRef, option: TableScanOptionRef) -> Self {
        let mut read_len = mem_table.read().len();
        if let Some(limit) = option.limit {
            read_len = min(read_len, limit)
        }
        let schema = Arc::new(mem_table.read().schema.as_schema());
        Self {
            mem_table,
            schema,
            send_index: 0,
            read_len,
            option,
        }
    }

    fn next_batch(&mut self) -> Option<RecordBatch> {
        if self.send_index >= self.read_len {
            None
        } else {
            // Send
            let end_index = min(self.send_index + CHUNK_SIZE, self.read_len);
            let batch = self.mem_table.write().read(self.send_index..end_index, &self.schema, &*self.option).unwrap();
            self.send_index = end_index;
            Some(batch)
        }
    }
}

impl Stream for MemTableReadStream {
    type Item = Result<RecordBatch, DataFusionError>;

    fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
        let me = self.get_mut();
        Poll::Ready(me.next_batch().map(Ok))
    }
}

impl RecordBatchStream for MemTableReadStream {
    fn schema(&self) -> SchemaRef {
        self.schema.clone()
    }
}