// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

//! Defines physical expressions that can evaluated at runtime during query execution

use std::cell::RefCell;
use std::fmt;
use std::rc::Rc;
use std::sync::Arc;

use crate::error::{ExecutionError, Result};
use crate::logical_plan::{Operator, ScalarValue};
use crate::physical_plan::common::get_scalar_value;
use crate::physical_plan::{Accumulator, AggregateExpr, PhysicalExpr};
use arrow::array::{
    ArrayRef, BooleanArray, Float32Array, Float64Array, Int16Array, Int32Array,
    Int64Array, Int8Array, StringArray, TimestampNanosecondArray, UInt16Array,
    UInt32Array, UInt64Array, UInt8Array,
};
use arrow::array::{
    Float32Builder, Float64Builder, Int16Builder, Int32Builder, Int64Builder,
    Int8Builder, StringBuilder, UInt16Builder, UInt32Builder, UInt64Builder,
    UInt8Builder,
};
use arrow::compute;
use arrow::compute::kernels;
use arrow::compute::kernels::arithmetic::{add, divide, multiply, subtract};
use arrow::compute::kernels::boolean::{and, or};
use arrow::compute::kernels::comparison::{eq, gt, gt_eq, lt, lt_eq, neq};
use arrow::compute::kernels::comparison::{
    eq_utf8, gt_eq_utf8, gt_utf8, like_utf8, lt_eq_utf8, lt_utf8, neq_utf8, nlike_utf8,
};
use arrow::compute::kernels::sort::{SortColumn, SortOptions};
use arrow::datatypes::{DataType, Schema, TimeUnit};
use arrow::record_batch::RecordBatch;

/// Represents the column at a given index in a RecordBatch
#[derive(Debug)]
pub struct Column {
    name: String,
}

impl Column {
    /// Create a new column expression
    pub fn new(name: &str) -> Self {
        Self {
            name: name.to_owned(),
        }
    }
}

impl fmt::Display for Column {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "{}", self.name)
    }
}

impl PhysicalExpr for Column {
    /// Get the data type of this expression, given the schema of the input
    fn data_type(&self, input_schema: &Schema) -> Result<DataType> {
        Ok(input_schema
            .field_with_name(&self.name)?
            .data_type()
            .clone())
    }

    /// Decide whehter this expression is nullable, given the schema of the input
    fn nullable(&self, input_schema: &Schema) -> Result<bool> {
        Ok(input_schema.field_with_name(&self.name)?.is_nullable())
    }

    /// Evaluate the expression
    fn evaluate(&self, batch: &RecordBatch) -> Result<ArrayRef> {
        Ok(batch.column(batch.schema().index_of(&self.name)?).clone())
    }
}

/// Create a column expression
pub fn col(name: &str) -> Arc<dyn PhysicalExpr> {
    Arc::new(Column::new(name))
}

/// SUM aggregate expression
#[derive(Debug)]
pub struct Sum {
    expr: Arc<dyn PhysicalExpr>,
}

impl Sum {
    /// Create a new SUM aggregate function
    pub fn new(expr: Arc<dyn PhysicalExpr>) -> Self {
        Self { expr }
    }
}

impl AggregateExpr for Sum {
    fn data_type(&self, input_schema: &Schema) -> Result<DataType> {
        match self.expr.data_type(input_schema)? {
            DataType::Int8 | DataType::Int16 | DataType::Int32 | DataType::Int64 => {
                Ok(DataType::Int64)
            }
            DataType::UInt8 | DataType::UInt16 | DataType::UInt32 | DataType::UInt64 => {
                Ok(DataType::UInt64)
            }
            DataType::Float32 => Ok(DataType::Float32),
            DataType::Float64 => Ok(DataType::Float64),
            other => Err(ExecutionError::General(format!(
                "SUM does not support {:?}",
                other
            ))),
        }
    }

    fn nullable(&self, _input_schema: &Schema) -> Result<bool> {
        // null should be returned if no rows are aggregated
        Ok(true)
    }

    fn evaluate_input(&self, batch: &RecordBatch) -> Result<ArrayRef> {
        self.expr.evaluate(batch)
    }

    fn create_accumulator(&self) -> Rc<RefCell<dyn Accumulator>> {
        Rc::new(RefCell::new(SumAccumulator { sum: None }))
    }

    fn create_reducer(&self, column_name: &str) -> Arc<dyn AggregateExpr> {
        Arc::new(Sum::new(Arc::new(Column::new(column_name))))
    }
}

macro_rules! sum_accumulate {
    ($SELF:ident, $VALUE:expr, $ARRAY_TYPE:ident, $SCALAR_VARIANT:ident, $TY:ty) => {{
        $SELF.sum = match $SELF.sum {
            Some(ScalarValue::$SCALAR_VARIANT(n)) => {
                Some(ScalarValue::$SCALAR_VARIANT(n + $VALUE as $TY))
            }
            Some(_) => {
                return Err(ExecutionError::InternalError(
                    "Unexpected ScalarValue variant".to_string(),
                ))
            }
            None => Some(ScalarValue::$SCALAR_VARIANT($VALUE as $TY)),
        };
    }};
}

#[derive(Debug)]
struct SumAccumulator {
    sum: Option<ScalarValue>,
}

impl Accumulator for SumAccumulator {
    fn accumulate_scalar(&mut self, value: Option<ScalarValue>) -> Result<()> {
        if let Some(value) = value {
            match value {
                ScalarValue::Int8(value) => {
                    sum_accumulate!(self, value, Int8Array, Int64, i64);
                }
                ScalarValue::Int16(value) => {
                    sum_accumulate!(self, value, Int16Array, Int64, i64);
                }
                ScalarValue::Int32(value) => {
                    sum_accumulate!(self, value, Int32Array, Int64, i64);
                }
                ScalarValue::Int64(value) => {
                    sum_accumulate!(self, value, Int64Array, Int64, i64);
                }
                ScalarValue::UInt8(value) => {
                    sum_accumulate!(self, value, UInt8Array, UInt64, u64);
                }
                ScalarValue::UInt16(value) => {
                    sum_accumulate!(self, value, UInt16Array, UInt64, u64);
                }
                ScalarValue::UInt32(value) => {
                    sum_accumulate!(self, value, UInt32Array, UInt64, u64);
                }
                ScalarValue::UInt64(value) => {
                    sum_accumulate!(self, value, UInt64Array, UInt64, u64);
                }
                ScalarValue::Float32(value) => {
                    sum_accumulate!(self, value, Float32Array, Float32, f32);
                }
                ScalarValue::Float64(value) => {
                    sum_accumulate!(self, value, Float64Array, Float64, f64);
                }
                other => {
                    return Err(ExecutionError::General(format!(
                        "SUM does not support {:?}",
                        other
                    )))
                }
            }
        }
        Ok(())
    }

    fn accumulate_batch(&mut self, array: &ArrayRef) -> Result<()> {
        let sum = match array.data_type() {
            DataType::UInt8 => {
                match compute::sum(array.as_any().downcast_ref::<UInt8Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::UInt8(n))),
                    None => Ok(None),
                }
            }
            DataType::UInt16 => {
                match compute::sum(array.as_any().downcast_ref::<UInt16Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::UInt16(n))),
                    None => Ok(None),
                }
            }
            DataType::UInt32 => {
                match compute::sum(array.as_any().downcast_ref::<UInt32Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::UInt32(n))),
                    None => Ok(None),
                }
            }
            DataType::UInt64 => {
                match compute::sum(array.as_any().downcast_ref::<UInt64Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::UInt64(n))),
                    None => Ok(None),
                }
            }
            DataType::Int8 => {
                match compute::sum(array.as_any().downcast_ref::<Int8Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int8(n))),
                    None => Ok(None),
                }
            }
            DataType::Int16 => {
                match compute::sum(array.as_any().downcast_ref::<Int16Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int16(n))),
                    None => Ok(None),
                }
            }
            DataType::Int32 => {
                match compute::sum(array.as_any().downcast_ref::<Int32Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int32(n))),
                    None => Ok(None),
                }
            }
            DataType::Int64 => {
                match compute::sum(array.as_any().downcast_ref::<Int64Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int64(n))),
                    None => Ok(None),
                }
            }
            DataType::Float32 => {
                match compute::sum(array.as_any().downcast_ref::<Float32Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::Float32(n))),
                    None => Ok(None),
                }
            }
            DataType::Float64 => {
                match compute::sum(array.as_any().downcast_ref::<Float64Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::Float64(n))),
                    None => Ok(None),
                }
            }
            _ => Err(ExecutionError::ExecutionError(
                "Unsupported data type for SUM".to_string(),
            )),
        }?;
        self.accumulate_scalar(sum)
    }

    fn get_value(&self) -> Result<Option<ScalarValue>> {
        Ok(self.sum.clone())
    }
}

/// Create a sum expression
pub fn sum(expr: Arc<dyn PhysicalExpr>) -> Arc<dyn AggregateExpr> {
    Arc::new(Sum::new(expr))
}

/// AVG aggregate expression
#[derive(Debug)]
pub struct Avg {
    expr: Arc<dyn PhysicalExpr>,
}

impl Avg {
    /// Create a new AVG aggregate function
    pub fn new(expr: Arc<dyn PhysicalExpr>) -> Self {
        Self { expr }
    }
}

impl AggregateExpr for Avg {
    fn data_type(&self, input_schema: &Schema) -> Result<DataType> {
        match self.expr.data_type(input_schema)? {
            DataType::Int8
            | DataType::Int16
            | DataType::Int32
            | DataType::Int64
            | DataType::UInt8
            | DataType::UInt16
            | DataType::UInt32
            | DataType::UInt64
            | DataType::Float32
            | DataType::Float64 => Ok(DataType::Float64),
            other => Err(ExecutionError::General(format!(
                "AVG does not support {:?}",
                other
            ))),
        }
    }

    fn nullable(&self, _input_schema: &Schema) -> Result<bool> {
        // null should be returned if no rows are aggregated
        Ok(true)
    }

    fn evaluate_input(&self, batch: &RecordBatch) -> Result<ArrayRef> {
        self.expr.evaluate(batch)
    }

    fn create_accumulator(&self) -> Rc<RefCell<dyn Accumulator>> {
        Rc::new(RefCell::new(AvgAccumulator {
            sum: None,
            count: None,
        }))
    }

    fn create_reducer(&self, column_name: &str) -> Arc<dyn AggregateExpr> {
        Arc::new(Avg::new(Arc::new(Column::new(column_name))))
    }
}

macro_rules! avg_accumulate {
    ($SELF:ident, $VALUE:expr, $ARRAY_TYPE:ident) => {{
        match ($SELF.sum, $SELF.count) {
            (Some(sum), Some(count)) => {
                $SELF.sum = Some(sum + $VALUE as f64);
                $SELF.count = Some(count + 1);
            }
            _ => {
                $SELF.sum = Some($VALUE as f64);
                $SELF.count = Some(1);
            }
        };
    }};
}
#[derive(Debug)]
struct AvgAccumulator {
    sum: Option<f64>,
    count: Option<i64>,
}

impl Accumulator for AvgAccumulator {
    fn accumulate_scalar(&mut self, value: Option<ScalarValue>) -> Result<()> {
        if let Some(value) = value {
            match value {
                ScalarValue::Int8(value) => avg_accumulate!(self, value, Int8Array),
                ScalarValue::Int16(value) => avg_accumulate!(self, value, Int16Array),
                ScalarValue::Int32(value) => avg_accumulate!(self, value, Int32Array),
                ScalarValue::Int64(value) => avg_accumulate!(self, value, Int64Array),
                ScalarValue::UInt8(value) => avg_accumulate!(self, value, UInt8Array),
                ScalarValue::UInt16(value) => avg_accumulate!(self, value, UInt16Array),
                ScalarValue::UInt32(value) => avg_accumulate!(self, value, UInt32Array),
                ScalarValue::UInt64(value) => avg_accumulate!(self, value, UInt64Array),
                ScalarValue::Float32(value) => avg_accumulate!(self, value, Float32Array),
                ScalarValue::Float64(value) => avg_accumulate!(self, value, Float64Array),
                other => {
                    return Err(ExecutionError::General(format!(
                        "AVG does not support {:?}",
                        other
                    )))
                }
            }
        }
        Ok(())
    }

    fn accumulate_batch(&mut self, array: &ArrayRef) -> Result<()> {
        for row in 0..array.len() {
            self.accumulate_scalar(get_scalar_value(array, row)?)?;
        }
        Ok(())
    }

    fn get_value(&self) -> Result<Option<ScalarValue>> {
        match (self.sum, self.count) {
            (Some(sum), Some(count)) => {
                Ok(Some(ScalarValue::Float64(sum / count as f64)))
            }
            _ => Ok(None),
        }
    }
}

/// Create a avg expression
pub fn avg(expr: Arc<dyn PhysicalExpr>) -> Arc<dyn AggregateExpr> {
    Arc::new(Avg::new(expr))
}

/// MAX aggregate expression
#[derive(Debug)]
pub struct Max {
    expr: Arc<dyn PhysicalExpr>,
}

impl Max {
    /// Create a new MAX aggregate function
    pub fn new(expr: Arc<dyn PhysicalExpr>) -> Self {
        Self { expr }
    }
}

impl AggregateExpr for Max {
    fn data_type(&self, input_schema: &Schema) -> Result<DataType> {
        self.expr.data_type(input_schema)
    }

    fn nullable(&self, _input_schema: &Schema) -> Result<bool> {
        // null should be returned if no rows are aggregated
        Ok(true)
    }

    fn evaluate_input(&self, batch: &RecordBatch) -> Result<ArrayRef> {
        self.expr.evaluate(batch)
    }

    fn create_accumulator(&self) -> Rc<RefCell<dyn Accumulator>> {
        Rc::new(RefCell::new(MaxAccumulator { max: None }))
    }

    fn create_reducer(&self, column_name: &str) -> Arc<dyn AggregateExpr> {
        Arc::new(Max::new(Arc::new(Column::new(column_name))))
    }
}

macro_rules! max_accumulate {
    ($SELF:ident, $VALUE:expr, $ARRAY_TYPE:ident, $SCALAR_VARIANT:ident) => {{
        match &$SELF.max {
            Some(ScalarValue::$SCALAR_VARIANT(n)) => {
                if ($VALUE) > *n {
                    $SELF.max = Some(ScalarValue::$SCALAR_VARIANT($VALUE))
                }
            }
            Some(_) => {
                return Err(ExecutionError::InternalError(
                    "Unexpected ScalarValue variant".to_string(),
                ))
            }
            None => $SELF.max = Some(ScalarValue::$SCALAR_VARIANT($VALUE)),
        };
    }};
}
#[derive(Debug)]
struct MaxAccumulator {
    max: Option<ScalarValue>,
}

impl Accumulator for MaxAccumulator {
    fn accumulate_scalar(&mut self, value: Option<ScalarValue>) -> Result<()> {
        if let Some(value) = value {
            match value {
                ScalarValue::Int8(value) => {
                    max_accumulate!(self, value, Int8Array, Int8);
                }
                ScalarValue::Int16(value) => {
                    max_accumulate!(self, value, Int16Array, Int16)
                }
                ScalarValue::Int32(value) => {
                    max_accumulate!(self, value, Int32Array, Int32)
                }
                ScalarValue::Int64(value) => {
                    max_accumulate!(self, value, Int64Array, Int64)
                }
                ScalarValue::UInt8(value) => {
                    max_accumulate!(self, value, UInt8Array, UInt8)
                }
                ScalarValue::UInt16(value) => {
                    max_accumulate!(self, value, UInt16Array, UInt16)
                }
                ScalarValue::UInt32(value) => {
                    max_accumulate!(self, value, UInt32Array, UInt32)
                }
                ScalarValue::UInt64(value) => {
                    max_accumulate!(self, value, UInt64Array, UInt64)
                }
                ScalarValue::Float32(value) => {
                    max_accumulate!(self, value, Float32Array, Float32)
                }
                ScalarValue::Float64(value) => {
                    max_accumulate!(self, value, Float64Array, Float64)
                }
                ScalarValue::Utf8(value) => {
                    max_accumulate!(self, value, StringArray, Utf8)
                }
                other => {
                    return Err(ExecutionError::General(format!(
                        "MAX does not support {:?}",
                        other
                    )))
                }
            }
        }
        Ok(())
    }

    fn accumulate_batch(&mut self, array: &ArrayRef) -> Result<()> {
        let max = match array.data_type() {
            DataType::UInt8 => {
                match compute::max(array.as_any().downcast_ref::<UInt8Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::UInt8(n))),
                    None => Ok(None),
                }
            }
            DataType::UInt16 => {
                match compute::max(array.as_any().downcast_ref::<UInt16Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::UInt16(n))),
                    None => Ok(None),
                }
            }
            DataType::UInt32 => {
                match compute::max(array.as_any().downcast_ref::<UInt32Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::UInt32(n))),
                    None => Ok(None),
                }
            }
            DataType::UInt64 => {
                match compute::max(array.as_any().downcast_ref::<UInt64Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::UInt64(n))),
                    None => Ok(None),
                }
            }
            DataType::Int8 => {
                match compute::max(array.as_any().downcast_ref::<Int8Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int8(n))),
                    None => Ok(None),
                }
            }
            DataType::Int16 => {
                match compute::max(array.as_any().downcast_ref::<Int16Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int16(n))),
                    None => Ok(None),
                }
            }
            DataType::Int32 => {
                match compute::max(array.as_any().downcast_ref::<Int32Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int32(n))),
                    None => Ok(None),
                }
            }
            DataType::Int64 => {
                match compute::max(array.as_any().downcast_ref::<Int64Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int64(n))),
                    None => Ok(None),
                }
            }
            DataType::Float32 => {
                match compute::max(array.as_any().downcast_ref::<Float32Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::Float32(n))),
                    None => Ok(None),
                }
            }
            DataType::Float64 => {
                match compute::max(array.as_any().downcast_ref::<Float64Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::Float64(n))),
                    None => Ok(None),
                }
            }
            _ => Err(ExecutionError::ExecutionError(
                "Unsupported data type for MAX".to_string(),
            )),
        }?;
        self.accumulate_scalar(max)
    }

    fn get_value(&self) -> Result<Option<ScalarValue>> {
        Ok(self.max.clone())
    }
}

/// Create a max expression
pub fn max(expr: Arc<dyn PhysicalExpr>) -> Arc<dyn AggregateExpr> {
    Arc::new(Max::new(expr))
}

/// MIN aggregate expression
#[derive(Debug)]
pub struct Min {
    expr: Arc<dyn PhysicalExpr>,
}

impl Min {
    /// Create a new MIN aggregate function
    pub fn new(expr: Arc<dyn PhysicalExpr>) -> Self {
        Self { expr }
    }
}

impl AggregateExpr for Min {
    fn data_type(&self, input_schema: &Schema) -> Result<DataType> {
        self.expr.data_type(input_schema)
    }

    fn nullable(&self, _input_schema: &Schema) -> Result<bool> {
        // null should be returned if no rows are aggregated
        Ok(true)
    }

    fn evaluate_input(&self, batch: &RecordBatch) -> Result<ArrayRef> {
        self.expr.evaluate(batch)
    }

    fn create_accumulator(&self) -> Rc<RefCell<dyn Accumulator>> {
        Rc::new(RefCell::new(MinAccumulator { min: None }))
    }

    fn create_reducer(&self, column_name: &str) -> Arc<dyn AggregateExpr> {
        Arc::new(Min::new(Arc::new(Column::new(column_name))))
    }
}

macro_rules! min_accumulate {
    ($SELF:ident, $VALUE:expr, $ARRAY_TYPE:ident, $SCALAR_VARIANT:ident) => {{
        match &$SELF.min {
            Some(ScalarValue::$SCALAR_VARIANT(n)) => {
                if ($VALUE) < *n {
                    $SELF.min = Some(ScalarValue::$SCALAR_VARIANT($VALUE))
                }
            }
            Some(_) => {
                return Err(ExecutionError::InternalError(
                    "Unexpected ScalarValue variant".to_string(),
                ))
            }
            None => $SELF.min = Some(ScalarValue::$SCALAR_VARIANT($VALUE)),
        };
    }};
}
#[derive(Debug)]
struct MinAccumulator {
    min: Option<ScalarValue>,
}

impl Accumulator for MinAccumulator {
    fn accumulate_scalar(&mut self, value: Option<ScalarValue>) -> Result<()> {
        if let Some(value) = value {
            match value {
                ScalarValue::Int8(value) => {
                    min_accumulate!(self, value, Int8Array, Int8);
                }
                ScalarValue::Int16(value) => {
                    min_accumulate!(self, value, Int16Array, Int16)
                }
                ScalarValue::Int32(value) => {
                    min_accumulate!(self, value, Int32Array, Int32)
                }
                ScalarValue::Int64(value) => {
                    min_accumulate!(self, value, Int64Array, Int64)
                }
                ScalarValue::UInt8(value) => {
                    min_accumulate!(self, value, UInt8Array, UInt8)
                }
                ScalarValue::UInt16(value) => {
                    min_accumulate!(self, value, UInt16Array, UInt16)
                }
                ScalarValue::UInt32(value) => {
                    min_accumulate!(self, value, UInt32Array, UInt32)
                }
                ScalarValue::UInt64(value) => {
                    min_accumulate!(self, value, UInt64Array, UInt64)
                }
                ScalarValue::Float32(value) => {
                    min_accumulate!(self, value, Float32Array, Float32)
                }
                ScalarValue::Float64(value) => {
                    min_accumulate!(self, value, Float64Array, Float64)
                }
                ScalarValue::Utf8(value) => {
                    min_accumulate!(self, value, StringArray, Utf8)
                }
                other => {
                    return Err(ExecutionError::General(format!(
                        "MIN does not support {:?}",
                        other
                    )))
                }
            }
        }
        Ok(())
    }

    fn accumulate_batch(&mut self, array: &ArrayRef) -> Result<()> {
        let min = match array.data_type() {
            DataType::UInt8 => {
                match compute::min(array.as_any().downcast_ref::<UInt8Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::UInt8(n))),
                    None => Ok(None),
                }
            }
            DataType::UInt16 => {
                match compute::min(array.as_any().downcast_ref::<UInt16Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::UInt16(n))),
                    None => Ok(None),
                }
            }
            DataType::UInt32 => {
                match compute::min(array.as_any().downcast_ref::<UInt32Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::UInt32(n))),
                    None => Ok(None),
                }
            }
            DataType::UInt64 => {
                match compute::min(array.as_any().downcast_ref::<UInt64Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::UInt64(n))),
                    None => Ok(None),
                }
            }
            DataType::Int8 => {
                match compute::min(array.as_any().downcast_ref::<Int8Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int8(n))),
                    None => Ok(None),
                }
            }
            DataType::Int16 => {
                match compute::min(array.as_any().downcast_ref::<Int16Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int16(n))),
                    None => Ok(None),
                }
            }
            DataType::Int32 => {
                match compute::min(array.as_any().downcast_ref::<Int32Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int32(n))),
                    None => Ok(None),
                }
            }
            DataType::Int64 => {
                match compute::min(array.as_any().downcast_ref::<Int64Array>().unwrap()) {
                    Some(n) => Ok(Some(ScalarValue::Int64(n))),
                    None => Ok(None),
                }
            }
            DataType::Float32 => {
                match compute::min(array.as_any().downcast_ref::<Float32Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::Float32(n))),
                    None => Ok(None),
                }
            }
            DataType::Float64 => {
                match compute::min(array.as_any().downcast_ref::<Float64Array>().unwrap())
                {
                    Some(n) => Ok(Some(ScalarValue::Float64(n))),
                    None => Ok(None),
                }
            }
            _ => Err(ExecutionError::ExecutionError(
                "Unsupported data type for MIN".to_string(),
            )),
        }?;
        self.accumulate_scalar(min)
    }

    fn get_value(&self) -> Result<Option<ScalarValue>> {
        Ok(self.min.clone())
    }
}

/// Create a min expression
pub fn min(expr: Arc<dyn PhysicalExpr>) -> Arc<dyn AggregateExpr> {
    Arc::new(Min::new(expr))
}

/// COUNT aggregate expression
/// Returns the amount of non-null values of the given expression.
#[derive(Debug)]
pub struct Count {
    expr: Arc<dyn PhysicalExpr>,
}

impl Count {
    /// Create a new COUNT aggregate function.
    pub fn new(expr: Arc<dyn PhysicalExpr>) -> Self {
        Self { expr: expr }
    }
}

impl AggregateExpr for Count {
    fn data_type(&self, _input_schema: &Schema) -> Result<DataType> {
        Ok(DataType::UInt64)
    }

    fn nullable(&self, _input_schema: &Schema) -> Result<bool> {
        // null should be returned if no rows are aggregated
        Ok(true)
    }

    fn evaluate_input(&self, batch: &RecordBatch) -> Result<ArrayRef> {
        self.expr.evaluate(batch)
    }

    fn create_accumulator(&self) -> Rc<RefCell<dyn Accumulator>> {
        Rc::new(RefCell::new(CountAccumulator { count: 0 }))
    }

    fn create_reducer(&self, column_name: &str) -> Arc<dyn AggregateExpr> {
        Arc::new(Sum::new(Arc::new(Column::new(column_name))))
    }
}

#[derive(Debug)]
struct CountAccumulator {
    count: u64,
}

impl Accumulator for CountAccumulator {
    fn accumulate_scalar(&mut self, value: Option<ScalarValue>) -> Result<()> {
        if value.is_some() {
            self.count += 1;
        }
        Ok(())
    }

    fn accumulate_batch(&mut self, array: &ArrayRef) -> Result<()> {
        self.count += array.len() as u64 - array.null_count() as u64;
        Ok(())
    }

    fn get_value(&self) -> Result<Option<ScalarValue>> {
        Ok(Some(ScalarValue::UInt64(self.count)))
    }
}

/// Create a count expression
pub fn count(expr: Arc<dyn PhysicalExpr>) -> Arc<dyn AggregateExpr> {
    Arc::new(Count::new(expr))
}

/// Invoke a compute kernel on a pair of binary data arrays
macro_rules! compute_utf8_op {
    ($LEFT:expr, $RIGHT:expr, $OP:ident, $DT:ident) => {{
        let ll = $LEFT
            .as_any()
            .downcast_ref::<$DT>()
            .expect("compute_op failed to downcast array");
        let rr = $RIGHT
            .as_any()
            .downcast_ref::<$DT>()
            .expect("compute_op failed to downcast array");
        Ok(Arc::new(paste::expr! {[<$OP _utf8>]}(&ll, &rr)?))
    }};
}

/// Invoke a compute kernel on a pair of arrays
macro_rules! compute_op {
    ($LEFT:expr, $RIGHT:expr, $OP:ident, $DT:ident) => {{
        let ll = $LEFT
            .as_any()
            .downcast_ref::<$DT>()
            .expect("compute_op failed to downcast array");
        let rr = $RIGHT
            .as_any()
            .downcast_ref::<$DT>()
            .expect("compute_op failed to downcast array");
        Ok(Arc::new($OP(&ll, &rr)?))
    }};
}

macro_rules! binary_string_array_op {
    ($LEFT:expr, $RIGHT:expr, $OP:ident) => {{
        match $LEFT.data_type() {
            DataType::Utf8 => compute_utf8_op!($LEFT, $RIGHT, $OP, StringArray),
            other => Err(ExecutionError::General(format!(
                "Unsupported data type {:?}",
                other
            ))),
        }
    }};
}

/// Invoke a compute kernel on a pair of arrays
/// The binary_primitive_array_op macro only evaluates for primitive types
/// like integers and floats.
macro_rules! binary_primitive_array_op {
    ($LEFT:expr, $RIGHT:expr, $OP:ident) => {{
        match $LEFT.data_type() {
            DataType::Int8 => compute_op!($LEFT, $RIGHT, $OP, Int8Array),
            DataType::Int16 => compute_op!($LEFT, $RIGHT, $OP, Int16Array),
            DataType::Int32 => compute_op!($LEFT, $RIGHT, $OP, Int32Array),
            DataType::Int64 => compute_op!($LEFT, $RIGHT, $OP, Int64Array),
            DataType::UInt8 => compute_op!($LEFT, $RIGHT, $OP, UInt8Array),
            DataType::UInt16 => compute_op!($LEFT, $RIGHT, $OP, UInt16Array),
            DataType::UInt32 => compute_op!($LEFT, $RIGHT, $OP, UInt32Array),
            DataType::UInt64 => compute_op!($LEFT, $RIGHT, $OP, UInt64Array),
            DataType::Float32 => compute_op!($LEFT, $RIGHT, $OP, Float32Array),
            DataType::Float64 => compute_op!($LEFT, $RIGHT, $OP, Float64Array),
            other => Err(ExecutionError::General(format!(
                "Unsupported data type {:?}",
                other
            ))),
        }
    }};
}

/// The binary_array_op macro includes types that extend beyond the primitive,
/// such as Utf8 strings.
macro_rules! binary_array_op {
    ($LEFT:expr, $RIGHT:expr, $OP:ident) => {{
        match $LEFT.data_type() {
            DataType::Int8 => compute_op!($LEFT, $RIGHT, $OP, Int8Array),
            DataType::Int16 => compute_op!($LEFT, $RIGHT, $OP, Int16Array),
            DataType::Int32 => compute_op!($LEFT, $RIGHT, $OP, Int32Array),
            DataType::Int64 => compute_op!($LEFT, $RIGHT, $OP, Int64Array),
            DataType::UInt8 => compute_op!($LEFT, $RIGHT, $OP, UInt8Array),
            DataType::UInt16 => compute_op!($LEFT, $RIGHT, $OP, UInt16Array),
            DataType::UInt32 => compute_op!($LEFT, $RIGHT, $OP, UInt32Array),
            DataType::UInt64 => compute_op!($LEFT, $RIGHT, $OP, UInt64Array),
            DataType::Float32 => compute_op!($LEFT, $RIGHT, $OP, Float32Array),
            DataType::Float64 => compute_op!($LEFT, $RIGHT, $OP, Float64Array),
            DataType::Utf8 => compute_utf8_op!($LEFT, $RIGHT, $OP, StringArray),
            DataType::Timestamp(TimeUnit::Nanosecond, None) => {
                compute_op!($LEFT, $RIGHT, $OP, TimestampNanosecondArray)
            }
            other => Err(ExecutionError::General(format!(
                "Unsupported data type {:?}",
                other
            ))),
        }
    }};
}

/// Invoke a boolean kernel on a pair of arrays
macro_rules! boolean_op {
    ($LEFT:expr, $RIGHT:expr, $OP:ident) => {{
        let ll = $LEFT
            .as_any()
            .downcast_ref::<BooleanArray>()
            .expect("boolean_op failed to downcast array");
        let rr = $RIGHT
            .as_any()
            .downcast_ref::<BooleanArray>()
            .expect("boolean_op failed to downcast array");
        Ok(Arc::new($OP(&ll, &rr)?))
    }};
}
/// Binary expression
#[derive(Debug)]
pub struct BinaryExpr {
    left: Arc<dyn PhysicalExpr>,
    op: Operator,
    right: Arc<dyn PhysicalExpr>,
}

impl BinaryExpr {
    /// Create new binary expression
    pub fn new(
        left: Arc<dyn PhysicalExpr>,
        op: Operator,
        right: Arc<dyn PhysicalExpr>,
    ) -> Self {
        Self { left, op, right }
    }
}

impl fmt::Display for BinaryExpr {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "{} {} {}", self.left, self.op, self.right)
    }
}

// the type that both lhs and rhs can be casted to for the purpose of a string computation
fn string_coercion(lhs_type: &DataType, rhs_type: &DataType) -> Option<DataType> {
    use arrow::datatypes::DataType::*;
    match (lhs_type, rhs_type) {
        (Utf8, Utf8) => Some(Utf8),
        (LargeUtf8, Utf8) => Some(LargeUtf8),
        (Utf8, LargeUtf8) => Some(LargeUtf8),
        (LargeUtf8, LargeUtf8) => Some(LargeUtf8),
        _ => None,
    }
}

/// coercion rule for numerical types
pub fn numerical_coercion(lhs_type: &DataType, rhs_type: &DataType) -> Option<DataType> {
    use arrow::datatypes::DataType::*;

    // error on any non-numeric type
    if !is_numeric(lhs_type) || !is_numeric(rhs_type) {
        return None;
    };

    // same type => all good
    if lhs_type == rhs_type {
        return Some(lhs_type.clone());
    }

    // these are ordered from most informative to least informative so
    // that the coercion removes the least amount of information
    match (lhs_type, rhs_type) {
        (Float64, _) => Some(Float64),
        (_, Float64) => Some(Float64),

        (_, Float32) => Some(Float32),
        (Float32, _) => Some(Float32),

        (Int64, _) => Some(Int64),
        (_, Int64) => Some(Int64),

        (Int32, _) => Some(Int32),
        (_, Int32) => Some(Int32),

        (Int16, _) => Some(Int16),
        (_, Int16) => Some(Int16),

        (Int8, _) => Some(Int8),
        (_, Int8) => Some(Int8),

        (UInt64, _) => Some(UInt64),
        (_, UInt64) => Some(UInt64),

        (UInt32, _) => Some(UInt32),
        (_, UInt32) => Some(UInt32),

        (UInt16, _) => Some(UInt16),
        (_, UInt16) => Some(UInt16),

        (UInt8, _) => Some(UInt8),
        (_, UInt8) => Some(UInt8),

        _ => None,
    }
}

// coercion rules for equality operations. This is a superset of all numerical coercion rules.
fn eq_coercion(lhs_type: &DataType, rhs_type: &DataType) -> Option<DataType> {
    if lhs_type == rhs_type {
        // same type => equality is possible
        return Some(lhs_type.clone());
    }
    numerical_coercion(lhs_type, rhs_type)
}

// coercion rules that assume an ordered set, such as "less than".
// These are the union of all numerical coercion rules and all string coercion rules
fn order_coercion(lhs_type: &DataType, rhs_type: &DataType) -> Option<DataType> {
    if lhs_type == rhs_type {
        // same type => all good
        return Some(lhs_type.clone());
    }

    match numerical_coercion(lhs_type, rhs_type) {
        None => {
            // strings are naturally ordered, and thus ordering can be applied to them.
            string_coercion(lhs_type, rhs_type)
        }
        t => t,
    }
}

/// coercion rules for all binary operators
fn common_binary_type(
    lhs_type: &DataType,
    op: &Operator,
    rhs_type: &DataType,
) -> Result<DataType> {
    // This result MUST be compatible with `binary_coerce`
    let result = match op {
        Operator::And | Operator::Or => match (lhs_type, rhs_type) {
            // logical binary boolean operators can only be evaluated in bools
            (DataType::Boolean, DataType::Boolean) => Some(DataType::Boolean),
            _ => None,
        },
        // logical equality operators have their own rules, and always return a boolean
        Operator::Eq | Operator::NotEq => eq_coercion(lhs_type, rhs_type),
        // "like" operators operate on strings and always return a boolean
        Operator::Like | Operator::NotLike => string_coercion(lhs_type, rhs_type),
        // order-comparison operators have their own rules
        Operator::Lt | Operator::Gt | Operator::GtEq | Operator::LtEq => {
            order_coercion(lhs_type, rhs_type)
        }
        // for math expressions, the final value of the coercion is also the return type
        // because coercion favours higher information types
        Operator::Plus | Operator::Minus | Operator::Divide | Operator::Multiply => {
            numerical_coercion(lhs_type, rhs_type)
        }
        Operator::Modulus => {
            return Err(ExecutionError::NotImplemented(
                "Modulus operator is still not supported".to_string(),
            ))
        }
        Operator::Not => {
            return Err(ExecutionError::InternalError(
                "Trying to coerce a unary operator".to_string(),
            ))
        }
    };

    // re-write the error message of failed coercions to include the operator's information
    match result {
        None => Err(ExecutionError::General(
            format!(
                "'{:?} {} {:?}' can't be evaluated because there isn't a common type to coerce the types to",
                lhs_type, op, rhs_type
            )
            .to_string(),
        )),
        Some(t) => Ok(t)
    }
}

/// Returns the return type of a binary operator or an error when the binary operator cannot
/// perform the computation between the argument's types, even after type coercion.
///
/// This function makes some assumptions about the underlying available computations.
pub fn binary_operator_data_type(
    lhs_type: &DataType,
    op: &Operator,
    rhs_type: &DataType,
) -> Result<DataType> {
    // validate that it is possible to perform the operation on incoming types.
    // (or the return datatype cannot be infered)
    let common_type = common_binary_type(lhs_type, op, rhs_type)?;

    match op {
        // operators that return a boolean
        Operator::Eq
        | Operator::NotEq
        | Operator::And
        | Operator::Or
        | Operator::Like
        | Operator::NotLike
        | Operator::Lt
        | Operator::Gt
        | Operator::GtEq
        | Operator::LtEq => Ok(DataType::Boolean),
        // math operations return the same value as the common coerced type
        Operator::Plus | Operator::Minus | Operator::Divide | Operator::Multiply => {
            Ok(common_type)
        }
        Operator::Modulus => Err(ExecutionError::NotImplemented(
            "Modulus operator is still not supported".to_string(),
        )),
        Operator::Not => Err(ExecutionError::InternalError(
            "Trying to coerce a unary operator".to_string(),
        )),
    }
}

/// return two physical expressions that are optionally coerced to a
/// common type that the binary operator supports.
fn binary_cast(
    lhs: Arc<dyn PhysicalExpr>,
    op: &Operator,
    rhs: Arc<dyn PhysicalExpr>,
    input_schema: &Schema,
) -> Result<(Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>)> {
    let lhs_type = &lhs.data_type(input_schema)?;
    let rhs_type = &rhs.data_type(input_schema)?;

    let cast_type = common_binary_type(lhs_type, op, rhs_type)?;

    Ok((
        cast(lhs, input_schema, cast_type.clone())?,
        cast(rhs, input_schema, cast_type)?,
    ))
}

impl PhysicalExpr for BinaryExpr {
    fn data_type(&self, input_schema: &Schema) -> Result<DataType> {
        binary_operator_data_type(
            &self.left.data_type(input_schema)?,
            &self.op,
            &self.right.data_type(input_schema)?,
        )
    }

    fn nullable(&self, input_schema: &Schema) -> Result<bool> {
        Ok(self.left.nullable(input_schema)? || self.right.nullable(input_schema)?)
    }

    fn evaluate(&self, batch: &RecordBatch) -> Result<ArrayRef> {
        let left = self.left.evaluate(batch)?;
        let right = self.right.evaluate(batch)?;
        if left.data_type() != right.data_type() {
            return Err(ExecutionError::General(format!(
                "Cannot evaluate binary expression {:?} with types {:?} and {:?}",
                self.op,
                left.data_type(),
                right.data_type()
            )));
        }
        match &self.op {
            Operator::Like => binary_string_array_op!(left, right, like),
            Operator::NotLike => binary_string_array_op!(left, right, nlike),
            Operator::Lt => binary_array_op!(left, right, lt),
            Operator::LtEq => binary_array_op!(left, right, lt_eq),
            Operator::Gt => binary_array_op!(left, right, gt),
            Operator::GtEq => binary_array_op!(left, right, gt_eq),
            Operator::Eq => binary_array_op!(left, right, eq),
            Operator::NotEq => binary_array_op!(left, right, neq),
            Operator::Plus => binary_primitive_array_op!(left, right, add),
            Operator::Minus => binary_primitive_array_op!(left, right, subtract),
            Operator::Multiply => binary_primitive_array_op!(left, right, multiply),
            Operator::Divide => binary_primitive_array_op!(left, right, divide),
            Operator::And => {
                if left.data_type() == &DataType::Boolean {
                    boolean_op!(left, right, and)
                } else {
                    return Err(ExecutionError::General(format!(
                        "Cannot evaluate binary expression {:?} with types {:?} and {:?}",
                        self.op,
                        left.data_type(),
                        right.data_type()
                    )));
                }
            }
            Operator::Or => {
                if left.data_type() == &DataType::Boolean {
                    boolean_op!(left, right, or)
                } else {
                    return Err(ExecutionError::General(format!(
                        "Cannot evaluate binary expression {:?} with types {:?} and {:?}",
                        self.op,
                        left.data_type(),
                        right.data_type()
                    )));
                }
            }
            Operator::Modulus => Err(ExecutionError::NotImplemented(
                "Modulus operator is still not supported".to_string(),
            )),
            Operator::Not => {
                Err(ExecutionError::General("Unsupported operator".to_string()))
            }
        }
    }
}

/// Create a binary expression whose arguments are correctly coerced.
/// This function errors if it is not possible to coerce the arguments
/// to computational types supported by the operator.
pub fn binary(
    lhs: Arc<dyn PhysicalExpr>,
    op: Operator,
    rhs: Arc<dyn PhysicalExpr>,
    input_schema: &Schema,
) -> Result<Arc<dyn PhysicalExpr>> {
    let (l, r) = binary_cast(lhs, &op, rhs, input_schema)?;
    Ok(Arc::new(BinaryExpr::new(l, op, r)))
}

/// Not expression
#[derive(Debug)]
pub struct NotExpr {
    arg: Arc<dyn PhysicalExpr>,
}

impl NotExpr {
    /// Create new not expression
    pub fn new(arg: Arc<dyn PhysicalExpr>) -> Self {
        Self { arg }
    }
}

impl fmt::Display for NotExpr {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "NOT {}", self.arg)
    }
}
impl PhysicalExpr for NotExpr {
    fn data_type(&self, _input_schema: &Schema) -> Result<DataType> {
        return Ok(DataType::Boolean);
    }

    fn nullable(&self, _input_schema: &Schema) -> Result<bool> {
        // !Null == true
        Ok(false)
    }

    fn evaluate(&self, batch: &RecordBatch) -> Result<ArrayRef> {
        let arg = self.arg.evaluate(batch)?;
        if arg.data_type() != &DataType::Boolean {
            return Err(ExecutionError::General(format!(
                "Cannot evaluate \"not\" expression with type {:?}",
                arg.data_type(),
            )));
        }
        let arg = arg
            .as_any()
            .downcast_ref::<BooleanArray>()
            .expect("boolean_op failed to downcast array");
        return Ok(Arc::new(arrow::compute::kernels::boolean::not(arg)?));
    }
}

/// Create a unary expression
pub fn not(arg: Arc<dyn PhysicalExpr>) -> Arc<dyn PhysicalExpr> {
    Arc::new(NotExpr::new(arg))
}

/// CAST expression casts an expression to a specific data type
#[derive(Debug)]
pub struct CastExpr {
    /// The expression to cast
    expr: Arc<dyn PhysicalExpr>,
    /// The data type to cast to
    cast_type: DataType,
}

/// Determine if a DataType is numeric or not
pub fn is_numeric(dt: &DataType) -> bool {
    match dt {
        DataType::Int8 | DataType::Int16 | DataType::Int32 | DataType::Int64 => true,
        DataType::UInt8 | DataType::UInt16 | DataType::UInt32 | DataType::UInt64 => true,
        DataType::Float16 | DataType::Float32 | DataType::Float64 => true,
        _ => false,
    }
}

impl fmt::Display for CastExpr {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "CAST({} AS {:?})", self.expr, self.cast_type)
    }
}

impl PhysicalExpr for CastExpr {
    fn data_type(&self, _input_schema: &Schema) -> Result<DataType> {
        Ok(self.cast_type.clone())
    }

    fn nullable(&self, input_schema: &Schema) -> Result<bool> {
        self.expr.nullable(input_schema)
    }

    fn evaluate(&self, batch: &RecordBatch) -> Result<ArrayRef> {
        let value = self.expr.evaluate(batch)?;
        Ok(kernels::cast::cast(&value, &self.cast_type)?)
    }
}

/// Returns a cast operation, if casting needed.
pub fn cast(
    expr: Arc<dyn PhysicalExpr>,
    input_schema: &Schema,
    cast_type: DataType,
) -> Result<Arc<dyn PhysicalExpr>> {
    let expr_type = expr.data_type(input_schema)?;
    if expr_type == cast_type {
        return Ok(expr.clone());
    }
    if is_numeric(&expr_type) && (is_numeric(&cast_type) || cast_type == DataType::Utf8) {
        Ok(Arc::new(CastExpr { expr, cast_type }))
    } else if expr_type == DataType::Binary && cast_type == DataType::Utf8 {
        Ok(Arc::new(CastExpr { expr, cast_type }))
    } else if is_numeric(&expr_type)
        && cast_type == DataType::Timestamp(TimeUnit::Nanosecond, None)
    {
        Ok(Arc::new(CastExpr { expr, cast_type }))
    } else {
        Err(ExecutionError::General(format!(
            "Invalid CAST from {:?} to {:?}",
            expr_type, cast_type
        )))
    }
}

/// Represents a non-null literal value
#[derive(Debug)]
pub struct Literal {
    value: ScalarValue,
}

impl Literal {
    /// Create a literal value expression
    pub fn new(value: ScalarValue) -> Self {
        Self { value }
    }
}

/// Build array containing the same literal value repeated. This is necessary because the Arrow
/// memory model does not have the concept of a scalar value currently.
macro_rules! build_literal_array {
    ($BATCH:ident, $BUILDER:ident, $VALUE:expr) => {{
        let mut builder = $BUILDER::new($BATCH.num_rows());
        for _ in 0..$BATCH.num_rows() {
            builder.append_value($VALUE)?;
        }
        Ok(Arc::new(builder.finish()))
    }};
}

impl fmt::Display for Literal {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        write!(f, "{}", self.value)
    }
}

impl PhysicalExpr for Literal {
    fn data_type(&self, _input_schema: &Schema) -> Result<DataType> {
        self.value.get_datatype()
    }

    fn nullable(&self, _input_schema: &Schema) -> Result<bool> {
        match &self.value {
            ScalarValue::Null => Ok(true),
            _ => Ok(false),
        }
    }

    fn evaluate(&self, batch: &RecordBatch) -> Result<ArrayRef> {
        match &self.value {
            ScalarValue::Int8(value) => build_literal_array!(batch, Int8Builder, *value),
            ScalarValue::Int16(value) => {
                build_literal_array!(batch, Int16Builder, *value)
            }
            ScalarValue::Int32(value) => {
                build_literal_array!(batch, Int32Builder, *value)
            }
            ScalarValue::Int64(value) => {
                build_literal_array!(batch, Int64Builder, *value)
            }
            ScalarValue::UInt8(value) => {
                build_literal_array!(batch, UInt8Builder, *value)
            }
            ScalarValue::UInt16(value) => {
                build_literal_array!(batch, UInt16Builder, *value)
            }
            ScalarValue::UInt32(value) => {
                build_literal_array!(batch, UInt32Builder, *value)
            }
            ScalarValue::UInt64(value) => {
                build_literal_array!(batch, UInt64Builder, *value)
            }
            ScalarValue::Float32(value) => {
                build_literal_array!(batch, Float32Builder, *value)
            }
            ScalarValue::Float64(value) => {
                build_literal_array!(batch, Float64Builder, *value)
            }
            ScalarValue::Utf8(value) => build_literal_array!(batch, StringBuilder, value),
            other => Err(ExecutionError::General(format!(
                "Unsupported literal type {:?}",
                other
            ))),
        }
    }
}

/// Create a literal expression
pub fn lit(value: ScalarValue) -> Arc<dyn PhysicalExpr> {
    Arc::new(Literal::new(value))
}

/// Represents Sort operation for a column in a RecordBatch
#[derive(Clone, Debug)]
pub struct PhysicalSortExpr {
    /// Physical expression representing the column to sort
    pub expr: Arc<dyn PhysicalExpr>,
    /// Option to specify how the given column should be sorted
    pub options: SortOptions,
}

impl PhysicalSortExpr {
    /// evaluate the sort expression into SortColumn that can be passed into arrow sort kernel
    pub fn evaluate_to_sort_column(&self, batch: &RecordBatch) -> Result<SortColumn> {
        Ok(SortColumn {
            values: self.expr.evaluate(batch)?,
            options: Some(self.options),
        })
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::error::Result;
    use crate::physical_plan::common::get_scalar_value;
    use arrow::array::{
        LargeStringArray, PrimitiveArray, StringArray, Time64NanosecondArray,
    };
    use arrow::datatypes::*;

    // Create a binary expression without coercion. Used here when we do not want to coerce the expressions
    // to valid types. Usage can result in an execution (after plan) error.
    fn binary_simple(
        l: Arc<dyn PhysicalExpr>,
        op: Operator,
        r: Arc<dyn PhysicalExpr>,
    ) -> Arc<dyn PhysicalExpr> {
        Arc::new(BinaryExpr::new(l, op, r))
    }

    #[test]
    fn binary_comparison() -> Result<()> {
        let schema = Schema::new(vec![
            Field::new("a", DataType::Int32, false),
            Field::new("b", DataType::Int32, false),
        ]);
        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
        let b = Int32Array::from(vec![1, 2, 4, 8, 16]);
        let batch = RecordBatch::try_new(
            Arc::new(schema.clone()),
            vec![Arc::new(a), Arc::new(b)],
        )?;

        // expression: "a < b"
        let lt = binary_simple(col("a"), Operator::Lt, col("b"));
        let result = lt.evaluate(&batch)?;
        assert_eq!(result.len(), 5);

        let expected = vec![false, false, true, true, true];
        let result = result
            .as_any()
            .downcast_ref::<BooleanArray>()
            .expect("failed to downcast to BooleanArray");
        for i in 0..5 {
            assert_eq!(result.value(i), expected[i]);
        }

        Ok(())
    }

    #[test]
    fn binary_nested() -> Result<()> {
        let schema = Schema::new(vec![
            Field::new("a", DataType::Int32, false),
            Field::new("b", DataType::Int32, false),
        ]);
        let a = Int32Array::from(vec![2, 4, 6, 8, 10]);
        let b = Int32Array::from(vec![2, 5, 4, 8, 8]);
        let batch = RecordBatch::try_new(
            Arc::new(schema.clone()),
            vec![Arc::new(a), Arc::new(b)],
        )?;

        // expression: "a < b OR a == b"
        let expr = binary_simple(
            binary_simple(col("a"), Operator::Lt, col("b")),
            Operator::Or,
            binary_simple(col("a"), Operator::Eq, col("b")),
        );
        assert_eq!("a < b OR a = b", format!("{}", expr));

        let result = expr.evaluate(&batch)?;
        assert_eq!(result.len(), 5);

        let expected = vec![true, true, false, true, false];
        let result = result
            .as_any()
            .downcast_ref::<BooleanArray>()
            .expect("failed to downcast to BooleanArray");
        for i in 0..5 {
            print!("{}", i);
            assert_eq!(result.value(i), expected[i]);
        }

        Ok(())
    }

    #[test]
    fn literal_i32() -> Result<()> {
        // create an arbitrary record bacth
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);
        let a = Int32Array::from(vec![Some(1), None, Some(3), Some(4), Some(5)]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        // create and evaluate a literal expression
        let literal_expr = lit(ScalarValue::Int32(42));
        assert_eq!("42", format!("{}", literal_expr));

        let literal_array = literal_expr.evaluate(&batch)?;
        let literal_array = literal_array.as_any().downcast_ref::<Int32Array>().unwrap();

        // note that the contents of the literal array are unrelated to the batch contents except for the length of the array
        assert_eq!(literal_array.len(), 5); // 5 rows in the batch
        for i in 0..literal_array.len() {
            assert_eq!(literal_array.value(i), 42);
        }

        Ok(())
    }

    // runs an end-to-end test of physical type coercion:
    // 1. construct a record batch with two columns of type A and B
    // 2. construct a physical expression of A OP B
    // 3. evaluate the expression
    // 4. verify that the resulting expression is of type C
    macro_rules! test_coercion {
        ($A_ARRAY:ident, $A_TYPE:expr, $A_VEC:expr, $B_ARRAY:ident, $B_TYPE:expr, $B_VEC:expr, $OP:expr, $TYPEARRAY:ident, $TYPE:expr, $VEC:expr) => {{
            let schema = Schema::new(vec![
                Field::new("a", $A_TYPE, false),
                Field::new("b", $B_TYPE, false),
            ]);
            let a = $A_ARRAY::from($A_VEC);
            let b = $B_ARRAY::from($B_VEC);
            let batch = RecordBatch::try_new(
                Arc::new(schema.clone()),
                vec![Arc::new(a), Arc::new(b)],
            )?;

            // verify that we can construct the expression
            let expression = binary(col("a"), $OP, col("b"), &schema)?;

            // verify that the expression's type is correct
            assert_eq!(expression.data_type(&schema)?, $TYPE);

            // compute
            let result = expression.evaluate(&batch)?;

            // verify that the array's data_type is correct
            assert_eq!(*result.data_type(), $TYPE);

            // verify that the data itself is downcastable
            let result = result
                .as_any()
                .downcast_ref::<$TYPEARRAY>()
                .expect("failed to downcast");
            // verify that the result itself is correct
            for (i, x) in $VEC.iter().enumerate() {
                assert_eq!(result.value(i), *x);
            }
        }};
    }

    #[test]
    fn test_type_coersion() -> Result<()> {
        test_coercion!(
            Int32Array,
            DataType::Int32,
            vec![1i32, 2i32],
            UInt32Array,
            DataType::UInt32,
            vec![1u32, 2u32],
            Operator::Plus,
            Int32Array,
            DataType::Int32,
            vec![2i32, 4i32]
        );
        test_coercion!(
            Int32Array,
            DataType::Int32,
            vec![1i32],
            UInt16Array,
            DataType::UInt16,
            vec![1u16],
            Operator::Plus,
            Int32Array,
            DataType::Int32,
            vec![2i32]
        );
        test_coercion!(
            Float32Array,
            DataType::Float32,
            vec![1f32],
            UInt16Array,
            DataType::UInt16,
            vec![1u16],
            Operator::Plus,
            Float32Array,
            DataType::Float32,
            vec![2f32]
        );
        test_coercion!(
            Float32Array,
            DataType::Float32,
            vec![2f32],
            UInt16Array,
            DataType::UInt16,
            vec![1u16],
            Operator::Multiply,
            Float32Array,
            DataType::Float32,
            vec![2f32]
        );
        test_coercion!(
            StringArray,
            DataType::Utf8,
            vec!["hello world", "world"],
            StringArray,
            DataType::Utf8,
            vec!["%hello%", "%hello%"],
            Operator::Like,
            BooleanArray,
            DataType::Boolean,
            vec![true, false]
        );
        Ok(())
    }

    #[test]
    fn test_coersion_error() -> Result<()> {
        let expr =
            common_binary_type(&DataType::Float32, &Operator::Plus, &DataType::Utf8);

        if let Err(ExecutionError::General(e)) = expr {
            assert_eq!(e, "'Float32 + Utf8' can't be evaluated because there isn't a common type to coerce the types to");
            Ok(())
        } else {
            Err(ExecutionError::General(
                "Coercion should have returned an ExecutionError::General".to_string(),
            ))
        }
    }

    #[test]
    fn test_coersion_invalid() -> Result<()> {
        let expr =
            common_binary_type(&DataType::Float32, &Operator::Not, &DataType::Utf8);
        if let Err(ExecutionError::InternalError(_)) = expr {
            Ok(())
        } else {
            Err(ExecutionError::General(
                "Coercion should have returned an ExecutionError::InternalError"
                    .to_string(),
            ))
        }
    }

    // runs an end-to-end test of physical type cast
    // 1. construct a record batch with a column "a" of type A
    // 2. construct a physical expression of CAST(a AS B)
    // 3. evaluate the expression
    // 4. verify that the resulting expression is of type B
    // 5. verify that the resulting values are downcastable and correct
    macro_rules! generic_test_cast {
        ($A_ARRAY:ident, $A_TYPE:expr, $A_VEC:expr, $TYPEARRAY:ident, $TYPE:expr, $VEC:expr) => {{
            let schema = Schema::new(vec![Field::new("a", $A_TYPE, false)]);
            let a = $A_ARRAY::from($A_VEC);
            let batch =
                RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

            // verify that we can construct the expression
            let expression = cast(col("a"), &schema, $TYPE)?;

            // verify that its display is correct
            assert_eq!(format!("CAST(a AS {:?})", $TYPE), format!("{}", expression));

            // verify that the expression's type is correct
            assert_eq!(expression.data_type(&schema)?, $TYPE);

            // compute
            let result = expression.evaluate(&batch)?;

            // verify that the array's data_type is correct
            assert_eq!(*result.data_type(), $TYPE);

            // verify that the len is correct
            assert_eq!(result.len(), $A_VEC.len());

            // verify that the data itself is downcastable
            let result = result
                .as_any()
                .downcast_ref::<$TYPEARRAY>()
                .expect("failed to downcast");

            // verify that the result itself is correct
            for (i, x) in $VEC.iter().enumerate() {
                assert_eq!(result.value(i), *x);
            }
        }};
    }

    #[test]
    fn test_cast_i32_u32() -> Result<()> {
        generic_test_cast!(
            Int32Array,
            DataType::Int32,
            vec![1, 2, 3, 4, 5],
            UInt32Array,
            DataType::UInt32,
            vec![1_u32, 2_u32, 3_u32, 4_u32, 5_u32]
        );
        Ok(())
    }

    #[test]
    fn test_cast_i32_utf8() -> Result<()> {
        generic_test_cast!(
            Int32Array,
            DataType::Int32,
            vec![1, 2, 3, 4, 5],
            StringArray,
            DataType::Utf8,
            vec!["1", "2", "3", "4", "5"]
        );
        Ok(())
    }

    #[test]
    fn test_cast_i64_t64() -> Result<()> {
        let original = vec![1, 2, 3, 4, 5];
        let expected: Vec<i64> = original
            .iter()
            .map(|i| Time64NanosecondArray::from(vec![*i]).value(0))
            .collect();
        generic_test_cast!(
            Int64Array,
            DataType::Int64,
            original.clone(),
            TimestampNanosecondArray,
            DataType::Timestamp(TimeUnit::Nanosecond, None),
            expected
        );
        Ok(())
    }

    #[test]
    fn invalid_cast() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Utf8, false)]);
        let result = cast(col("a"), &schema, DataType::Int32);
        result.expect_err("Invalid CAST from Utf8 to Int32");
        Ok(())
    }

    #[test]
    fn sum_contract() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let sum = sum(col("a"));
        assert_eq!(DataType::Int64, sum.data_type(&schema)?);

        // after the aggr expression is applied, the schema changes to:
        let schema = Schema::new(vec![
            schema.field(0).clone(),
            Field::new("SUM(a)", sum.data_type(&schema)?, false),
        ]);

        let combiner = sum.create_reducer("SUM(a)");
        assert_eq!(DataType::Int64, combiner.data_type(&schema)?);

        Ok(())
    }

    #[test]
    fn max_contract() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let max = max(col("a"));
        assert_eq!(DataType::Int32, max.data_type(&schema)?);

        // after the aggr expression is applied, the schema changes to:
        let schema = Schema::new(vec![
            schema.field(0).clone(),
            Field::new("Max(a)", max.data_type(&schema)?, false),
        ]);

        let combiner = max.create_reducer("Max(a)");
        assert_eq!(DataType::Int32, combiner.data_type(&schema)?);

        Ok(())
    }

    #[test]
    fn min_contract() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let min = min(col("a"));
        assert_eq!(DataType::Int32, min.data_type(&schema)?);

        // after the aggr expression is applied, the schema changes to:
        let schema = Schema::new(vec![
            schema.field(0).clone(),
            Field::new("MIN(a)", min.data_type(&schema)?, false),
        ]);
        let combiner = min.create_reducer("MIN(a)");
        assert_eq!(DataType::Int32, combiner.data_type(&schema)?);

        Ok(())
    }
    #[test]
    fn avg_contract() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let avg = avg(col("a"));
        assert_eq!(DataType::Float64, avg.data_type(&schema)?);

        // after the aggr expression is applied, the schema changes to:
        let schema = Schema::new(vec![
            schema.field(0).clone(),
            Field::new("SUM(a)", avg.data_type(&schema)?, false),
        ]);

        let combiner = avg.create_reducer("SUM(a)");
        assert_eq!(DataType::Float64, combiner.data_type(&schema)?);

        Ok(())
    }

    #[test]
    fn sum_i32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_sum(&batch)?, Some(ScalarValue::Int64(15)));

        Ok(())
    }

    #[test]
    fn avg_i32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_avg(&batch)?, Some(ScalarValue::Float64(3_f64)));

        Ok(())
    }

    #[test]
    fn max_i32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_max(&batch)?, Some(ScalarValue::Int32(5)));

        Ok(())
    }

    #[test]
    fn max_utf8() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Utf8, false)]);

        let a = StringArray::from(vec!["d", "a", "c", "b"]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_max(&batch)?, Some(ScalarValue::Utf8("d".to_string())));

        Ok(())
    }

    #[test]
    fn max_large_utf8() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::LargeUtf8, false)]);

        let a = LargeStringArray::from(vec!["d", "a", "c", "b"]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_max(&batch)?, Some(ScalarValue::Utf8("d".to_string())));

        Ok(())
    }

    #[test]
    fn min_i32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_min(&batch)?, Some(ScalarValue::Int32(1)));

        Ok(())
    }

    #[test]
    fn min_utf8() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Utf8, false)]);

        let a = StringArray::from(vec!["d", "a", "c", "b"]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_min(&batch)?, Some(ScalarValue::Utf8("a".to_string())));

        Ok(())
    }

    #[test]
    fn min_large_utf8() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::LargeUtf8, false)]);

        let a = LargeStringArray::from(vec!["d", "a", "c", "b"]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_min(&batch)?, Some(ScalarValue::Utf8("a".to_string())));

        Ok(())
    }

    #[test]
    fn sum_i32_with_nulls() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![Some(1), None, Some(3), Some(4), Some(5)]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_sum(&batch)?, Some(ScalarValue::Int64(13)));

        Ok(())
    }

    #[test]
    fn avg_i32_with_nulls() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![Some(1), None, Some(3), Some(4), Some(5)]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_avg(&batch)?, Some(ScalarValue::Float64(3.25)));

        Ok(())
    }

    #[test]
    fn max_i32_with_nulls() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![Some(1), None, Some(3), Some(4), Some(5)]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_max(&batch)?, Some(ScalarValue::Int32(5)));

        Ok(())
    }

    #[test]
    fn min_i32_with_nulls() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![Some(1), None, Some(3), Some(4), Some(5)]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_min(&batch)?, Some(ScalarValue::Int32(1)));

        Ok(())
    }

    #[test]
    fn sum_i32_all_nulls() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![None, None]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_sum(&batch)?, None);

        Ok(())
    }

    #[test]
    fn max_i32_all_nulls() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![None, None]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_max(&batch)?, None);

        Ok(())
    }

    #[test]
    fn min_i32_all_nulls() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![None, None]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_min(&batch)?, None);

        Ok(())
    }

    #[test]
    fn avg_i32_all_nulls() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);

        let a = Int32Array::from(vec![None, None]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_avg(&batch)?, None);

        Ok(())
    }

    #[test]
    fn sum_u32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::UInt32, false)]);

        let a = UInt32Array::from(vec![1_u32, 2_u32, 3_u32, 4_u32, 5_u32]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_sum(&batch)?, Some(ScalarValue::UInt64(15_u64)));

        Ok(())
    }

    #[test]
    fn avg_u32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::UInt32, false)]);

        let a = UInt32Array::from(vec![1_u32, 2_u32, 3_u32, 4_u32, 5_u32]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_avg(&batch)?, Some(ScalarValue::Float64(3_f64)));

        Ok(())
    }

    #[test]
    fn max_u32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::UInt32, false)]);

        let a = UInt32Array::from(vec![1_u32, 2_u32, 3_u32, 4_u32, 5_u32]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_max(&batch)?, Some(ScalarValue::UInt32(5_u32)));

        Ok(())
    }

    #[test]
    fn min_u32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::UInt32, false)]);

        let a = UInt32Array::from(vec![1_u32, 2_u32, 3_u32, 4_u32, 5_u32]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_min(&batch)?, Some(ScalarValue::UInt32(1_u32)));

        Ok(())
    }

    #[test]
    fn sum_f32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Float32, false)]);

        let a = Float32Array::from(vec![1_f32, 2_f32, 3_f32, 4_f32, 5_f32]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_sum(&batch)?, Some(ScalarValue::Float32(15_f32)));

        Ok(())
    }

    #[test]
    fn avg_f32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Float32, false)]);

        let a = Float32Array::from(vec![1_f32, 2_f32, 3_f32, 4_f32, 5_f32]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_avg(&batch)?, Some(ScalarValue::Float64(3_f64)));

        Ok(())
    }

    #[test]
    fn max_f32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Float32, false)]);

        let a = Float32Array::from(vec![1_f32, 2_f32, 3_f32, 4_f32, 5_f32]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_max(&batch)?, Some(ScalarValue::Float32(5_f32)));

        Ok(())
    }

    #[test]
    fn min_f32() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Float32, false)]);

        let a = Float32Array::from(vec![1_f32, 2_f32, 3_f32, 4_f32, 5_f32]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_min(&batch)?, Some(ScalarValue::Float32(1_f32)));

        Ok(())
    }

    #[test]
    fn sum_f64() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Float64, false)]);

        let a = Float64Array::from(vec![1_f64, 2_f64, 3_f64, 4_f64, 5_f64]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_sum(&batch)?, Some(ScalarValue::Float64(15_f64)));

        Ok(())
    }

    #[test]
    fn avg_f64() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Float64, false)]);

        let a = Float64Array::from(vec![1_f64, 2_f64, 3_f64, 4_f64, 5_f64]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_avg(&batch)?, Some(ScalarValue::Float64(3_f64)));

        Ok(())
    }

    #[test]
    fn max_f64() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Float64, false)]);

        let a = Float64Array::from(vec![1_f64, 2_f64, 3_f64, 4_f64, 5_f64]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_max(&batch)?, Some(ScalarValue::Float64(5_f64)));

        Ok(())
    }

    #[test]
    fn min_f64() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Float64, false)]);

        let a = Float64Array::from(vec![1_f64, 2_f64, 3_f64, 4_f64, 5_f64]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        assert_eq!(do_min(&batch)?, Some(ScalarValue::Float64(1_f64)));

        Ok(())
    }

    #[test]
    fn count_elements() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);
        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;
        assert_eq!(do_count(&batch)?, Some(ScalarValue::UInt64(5)));
        Ok(())
    }

    #[test]
    fn count_with_nulls() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);
        let a = Int32Array::from(vec![Some(1), Some(2), None, None, Some(3), None]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;
        assert_eq!(do_count(&batch)?, Some(ScalarValue::UInt64(3)));
        Ok(())
    }

    #[test]
    fn count_all_nulls() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Boolean, false)]);
        let a = BooleanArray::from(vec![None, None, None, None, None, None, None, None]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;
        assert_eq!(do_count(&batch)?, Some(ScalarValue::UInt64(0)));
        Ok(())
    }

    #[test]
    fn count_empty() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Boolean, false)]);
        let a = BooleanArray::from(Vec::<bool>::new());
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;
        assert_eq!(do_count(&batch)?, Some(ScalarValue::UInt64(0)));
        Ok(())
    }

    #[test]
    fn count_utf8() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Utf8, false)]);
        let a = StringArray::from(vec!["a", "bb", "ccc", "dddd", "ad"]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;
        assert_eq!(do_count(&batch)?, Some(ScalarValue::UInt64(5)));
        Ok(())
    }

    #[test]
    fn count_large_utf8() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::LargeUtf8, false)]);
        let a = LargeStringArray::from(vec!["a", "bb", "ccc", "dddd", "ad"]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;
        assert_eq!(do_count(&batch)?, Some(ScalarValue::UInt64(5)));
        Ok(())
    }

    fn do_sum(batch: &RecordBatch) -> Result<Option<ScalarValue>> {
        let sum = sum(col("a"));
        let accum = sum.create_accumulator();
        let input = sum.evaluate_input(batch)?;
        let mut accum = accum.borrow_mut();
        for i in 0..batch.num_rows() {
            accum.accumulate_scalar(get_scalar_value(&input, i)?)?;
        }
        accum.get_value()
    }

    fn do_max(batch: &RecordBatch) -> Result<Option<ScalarValue>> {
        let max = max(col("a"));
        let accum = max.create_accumulator();
        let input = max.evaluate_input(batch)?;
        let mut accum = accum.borrow_mut();
        for i in 0..batch.num_rows() {
            accum.accumulate_scalar(get_scalar_value(&input, i)?)?;
        }
        accum.get_value()
    }

    fn do_min(batch: &RecordBatch) -> Result<Option<ScalarValue>> {
        let min = min(col("a"));
        let accum = min.create_accumulator();
        let input = min.evaluate_input(batch)?;
        let mut accum = accum.borrow_mut();
        for i in 0..batch.num_rows() {
            accum.accumulate_scalar(get_scalar_value(&input, i)?)?;
        }
        accum.get_value()
    }

    fn do_count(batch: &RecordBatch) -> Result<Option<ScalarValue>> {
        let count = count(col("a"));
        let accum = count.create_accumulator();
        let input = count.evaluate_input(batch)?;
        let mut accum = accum.borrow_mut();
        for i in 0..batch.num_rows() {
            accum.accumulate_scalar(get_scalar_value(&input, i)?)?;
        }
        accum.get_value()
    }

    fn do_avg(batch: &RecordBatch) -> Result<Option<ScalarValue>> {
        let avg = avg(col("a"));
        let accum = avg.create_accumulator();
        let input = avg.evaluate_input(batch)?;
        let mut accum = accum.borrow_mut();
        for i in 0..batch.num_rows() {
            accum.accumulate_scalar(get_scalar_value(&input, i)?)?;
        }
        accum.get_value()
    }

    #[test]
    fn plus_op() -> Result<()> {
        let schema = Schema::new(vec![
            Field::new("a", DataType::Int32, false),
            Field::new("b", DataType::Int32, false),
        ]);
        let a = Int32Array::from(vec![1, 2, 3, 4, 5]);
        let b = Int32Array::from(vec![1, 2, 4, 8, 16]);

        apply_arithmetic::<Int32Type>(
            Arc::new(schema),
            vec![Arc::new(a), Arc::new(b)],
            Operator::Plus,
            Int32Array::from(vec![2, 4, 7, 12, 21]),
        )?;

        Ok(())
    }

    #[test]
    fn minus_op() -> Result<()> {
        let schema = Arc::new(Schema::new(vec![
            Field::new("a", DataType::Int32, false),
            Field::new("b", DataType::Int32, false),
        ]));
        let a = Arc::new(Int32Array::from(vec![1, 2, 4, 8, 16]));
        let b = Arc::new(Int32Array::from(vec![1, 2, 3, 4, 5]));

        apply_arithmetic::<Int32Type>(
            schema.clone(),
            vec![a.clone(), b.clone()],
            Operator::Minus,
            Int32Array::from(vec![0, 0, 1, 4, 11]),
        )?;

        // should handle have negative values in result (for signed)
        apply_arithmetic::<Int32Type>(
            schema.clone(),
            vec![b.clone(), a.clone()],
            Operator::Minus,
            Int32Array::from(vec![0, 0, -1, -4, -11]),
        )?;

        Ok(())
    }

    #[test]
    fn multiply_op() -> Result<()> {
        let schema = Arc::new(Schema::new(vec![
            Field::new("a", DataType::Int32, false),
            Field::new("b", DataType::Int32, false),
        ]));
        let a = Arc::new(Int32Array::from(vec![4, 8, 16, 32, 64]));
        let b = Arc::new(Int32Array::from(vec![2, 4, 8, 16, 32]));

        apply_arithmetic::<Int32Type>(
            schema,
            vec![a, b],
            Operator::Multiply,
            Int32Array::from(vec![8, 32, 128, 512, 2048]),
        )?;

        Ok(())
    }

    #[test]
    fn divide_op() -> Result<()> {
        let schema = Arc::new(Schema::new(vec![
            Field::new("a", DataType::Int32, false),
            Field::new("b", DataType::Int32, false),
        ]));
        let a = Arc::new(Int32Array::from(vec![8, 32, 128, 512, 2048]));
        let b = Arc::new(Int32Array::from(vec![2, 4, 8, 16, 32]));

        apply_arithmetic::<Int32Type>(
            schema,
            vec![a, b],
            Operator::Divide,
            Int32Array::from(vec![4, 8, 16, 32, 64]),
        )?;

        Ok(())
    }

    fn apply_arithmetic<T: ArrowNumericType>(
        schema: SchemaRef,
        data: Vec<ArrayRef>,
        op: Operator,
        expected: PrimitiveArray<T>,
    ) -> Result<()> {
        let arithmetic_op = binary_simple(col("a"), op, col("b"));
        let batch = RecordBatch::try_new(schema, data)?;
        let result = arithmetic_op.evaluate(&batch)?;

        assert_array_eq::<T>(expected, result);

        Ok(())
    }

    fn assert_array_eq<T: ArrowNumericType>(
        expected: PrimitiveArray<T>,
        actual: ArrayRef,
    ) {
        let actual = actual
            .as_any()
            .downcast_ref::<PrimitiveArray<T>>()
            .expect("Actual array should unwrap to type of expected array");

        for i in 0..expected.len() {
            assert_eq!(expected.value(i), actual.value(i));
        }
    }

    #[test]
    fn neg_op() -> Result<()> {
        let schema = Schema::new(vec![Field::new("a", DataType::Boolean, true)]);
        let a = BooleanArray::from(vec![true, false]);
        let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![Arc::new(a)])?;

        // expression: "!a"
        let lt = not(col("a"));
        let result = lt.evaluate(&batch)?;
        assert_eq!(result.len(), 2);

        let expected = vec![false, true];
        let result = result
            .as_any()
            .downcast_ref::<BooleanArray>()
            .expect("failed to downcast to BooleanArray");
        for i in 0..2 {
            assert_eq!(result.value(i), expected[i]);
        }

        Ok(())
    }
}
