use alloc::vec::Vec;
use core::str::FromStr;
use strum_macros::{AsRefStr, Display, EnumString};

use super::pass::{Pass, PassResult};
use crate::ir::{
    graph::Graph,
    node::NodeId,
    tensor::{DType, Tensor},
};

#[derive(Clone, Copy, Debug, PartialEq, Eq, AsRefStr, EnumString, Display)]
enum ElementwiseKind {
    Add,
    Sub,
    Mul,
    Div,
    Max,
    Min,
}

pub struct ConstantFolding {}

impl ConstantFolding {
    #[inline]
    fn num_elements(shape: &[usize]) -> usize {
        shape.iter().fold(1usize, |acc, &d| acc.saturating_mul(d))
    }

    #[inline]
    fn is_scalar(shape: &[usize]) -> bool {
        Self::num_elements(shape) == 1
    }

    fn build_tensor_from_vec<T: Copy>(values: Vec<T>, dtype: DType, shape: Vec<usize>) -> Tensor {
        let mut tensor = Tensor::new(dtype, shape);
        let out_slice = tensor.as_mut_slice::<T>();
        debug_assert_eq!(out_slice.len(), values.len());
        for (i, v) in values.iter().enumerate() {
            out_slice[i] = *v;
        }
        tensor
    }

    fn fold_same_or_scalar_float<
        T: Copy
            + PartialOrd
            + core::ops::Add<Output = T>
            + core::ops::Sub<Output = T>
            + core::ops::Mul<Output = T>
            + core::ops::Div<Output = T>,
    >(
        kind: ElementwiseKind,
        a_data: &[T],
        a_shape: &[usize],
        b_data: &[T],
        b_shape: &[usize],
    ) -> Option<(Vec<T>, Vec<usize>)> {
        let same_shape = a_shape == b_shape;
        let a_is_scalar = Self::is_scalar(a_shape);
        let b_is_scalar = Self::is_scalar(b_shape);
        if !(same_shape || a_is_scalar || b_is_scalar) {
            return None;
        }

        let apply = |x: T, y: T| -> T {
            match kind {
                ElementwiseKind::Add => x + y,
                ElementwiseKind::Sub => x - y,
                ElementwiseKind::Mul => x * y,
                ElementwiseKind::Div => x / y,
                ElementwiseKind::Max => {
                    if x > y {
                        x
                    } else {
                        y
                    }
                }
                ElementwiseKind::Min => {
                    if x < y {
                        x
                    } else {
                        y
                    }
                }
            }
        };

        let (out_len, out_shape) = if same_shape {
            (a_data.len(), a_shape.to_vec())
        } else if a_is_scalar {
            (b_data.len(), b_shape.to_vec())
        } else {
            (a_data.len(), a_shape.to_vec())
        };

        let mut out = Vec::with_capacity(out_len);
        if same_shape {
            for i in 0..out_len {
                out.push(apply(a_data[i], b_data[i]));
            }
        } else if a_is_scalar {
            let s = a_data[0];
            for &y in b_data {
                out.push(apply(s, y));
            }
        } else {
            let s = b_data[0];
            for &x in a_data {
                out.push(apply(x, s));
            }
        }

        Some((out, out_shape))
    }

    fn fold_for_float<T>(
        kind: ElementwiseKind,
        a: &Tensor,
        b: &Tensor,
        dtype: DType,
    ) -> Option<Tensor>
    where
        T: Copy
            + PartialOrd
            + core::ops::Add<Output = T>
            + core::ops::Sub<Output = T>
            + core::ops::Mul<Output = T>
            + core::ops::Div<Output = T>,
    {
        let xs = a.as_slice::<T>();
        let ys = b.as_slice::<T>();
        let (out_vec, out_shape) =
            Self::fold_same_or_scalar_float::<T>(kind, xs, &a.shape, ys, &b.shape)?;
        Some(Self::build_tensor_from_vec::<T>(out_vec, dtype, out_shape))
    }

    fn fold_for_int<T>(
        kind: ElementwiseKind,
        a: &Tensor,
        b: &Tensor,
        dtype: DType,
    ) -> Option<Tensor>
    where
        T: Copy
            + PartialOrd
            + core::ops::Add<Output = T>
            + core::ops::Sub<Output = T>
            + core::ops::Mul<Output = T>
            + core::ops::Div<Output = T>
            + Default,
    {
        let xs = a.as_slice::<T>();
        let ys = b.as_slice::<T>();
        let (out_vec, out_shape) =
            Self::fold_same_or_scalar_int::<T>(kind, xs, &a.shape, ys, &b.shape)?;
        Some(Self::build_tensor_from_vec::<T>(out_vec, dtype, out_shape))
    }

    fn fold_same_or_scalar_int<
        T: Copy
            + PartialOrd
            + core::ops::Add<Output = T>
            + core::ops::Sub<Output = T>
            + core::ops::Mul<Output = T>
            + core::ops::Div<Output = T>
            + Default,
    >(
        kind: ElementwiseKind,
        a_data: &[T],
        a_shape: &[usize],
        b_data: &[T],
        b_shape: &[usize],
    ) -> Option<(Vec<T>, Vec<usize>)> {
        let same_shape = a_shape == b_shape;
        let a_is_scalar = Self::is_scalar(a_shape);
        let b_is_scalar = Self::is_scalar(b_shape);
        if !(same_shape || a_is_scalar || b_is_scalar) {
            return None;
        }

        if matches!(kind, ElementwiseKind::Div) {
            let zero: T = Default::default();
            if same_shape && b_data.iter().any(|&v| v == zero) {
                return None;
            }
            if a_is_scalar && b_data.iter().any(|&v| v == zero) {
                return None;
            }
            if b_is_scalar && b_data[0] == zero {
                return None;
            }
        }

        let apply = |x: T, y: T| -> T {
            match kind {
                ElementwiseKind::Add => x + y,
                ElementwiseKind::Sub => x - y,
                ElementwiseKind::Mul => x * y,
                ElementwiseKind::Div => x / y,
                ElementwiseKind::Max => {
                    if x > y {
                        x
                    } else {
                        y
                    }
                }
                ElementwiseKind::Min => {
                    if x < y {
                        x
                    } else {
                        y
                    }
                }
            }
        };

        let (out_len, out_shape) = if same_shape {
            (a_data.len(), a_shape.to_vec())
        } else if a_is_scalar {
            (b_data.len(), b_shape.to_vec())
        } else {
            (a_data.len(), a_shape.to_vec())
        };

        let mut out = Vec::with_capacity(out_len);
        if same_shape {
            for i in 0..out_len {
                out.push(apply(a_data[i], b_data[i]));
            }
        } else if a_is_scalar {
            let s = a_data[0];
            for &y in b_data {
                out.push(apply(s, y));
            }
        } else {
            let s = b_data[0];
            for &x in a_data {
                out.push(apply(x, s));
            }
        }

        Some((out, out_shape))
    }

    #[inline]
    fn remove_node_logically(graph: &mut Graph, node_id: NodeId) {
        if node_id as usize >= graph.nodes.len() {
            return;
        }
        let node = &graph.nodes[node_id as usize];
        for &input_value in &node.inputs {
            if let Some(consumer_list) = graph.consumers.get_mut(input_value as usize) {
                if let Some(pos) = consumer_list.iter().position(|&x| x == node_id) {
                    consumer_list.swap_remove(pos);
                }
            }
        }

        for &output_value in &node.outputs {
            if let Some(producer) = graph.producers.get_mut(output_value as usize) {
                if producer == &Some(node_id) {
                    *producer = None;
                }
            }
        }

        let node_mut = &mut graph.nodes[node_id as usize];
        node_mut.inputs.clear();
        node_mut.outputs.clear();
        node_mut.attrs.clear();
        node_mut.domain.clear();
        node_mut.op_type.clear();
    }
}

impl Pass for ConstantFolding {
    fn name(&self) -> &'static str {
        "ConstFold"
    }

    fn run(&mut self, graph: &mut Graph) -> PassResult {
        let topo = graph.topo_order();
        let mut changed = false;

        'scan_nodes: for node_id in topo {
            let node = &graph.nodes[node_id as usize];
            if node.op_type.is_empty() {
                continue;
            }

            let kind = match ElementwiseKind::from_str(&node.op_type) {
                Ok(k) => k,
                Err(_) => continue,
            };
            if node.inputs.len() != 2 || node.outputs.len() != 1 {
                continue;
            }

            let input_a_id = node.inputs[0];
            let input_b_id = node.inputs[1];
            let output_value_id = node.outputs[0];

            let (const_a, const_b) = match (
                graph.values[input_a_id as usize].constant.as_ref(),
                graph.values[input_b_id as usize].constant.as_ref(),
            ) {
                (Some(a), Some(b)) if a.dtype == b.dtype => (a, b),
                _ => continue,
            };

            let dtype = const_a.dtype;
            let same_shape = const_a.shape == const_b.shape;
            let a_is_scalar = Self::is_scalar(&const_a.shape);
            let b_is_scalar = Self::is_scalar(&const_b.shape);
            if !(same_shape || a_is_scalar || b_is_scalar) {
                continue;
            }

            let folded: Option<Tensor> = match dtype {
                DType::F32 => Self::fold_for_float::<f32>(kind, const_a, const_b, dtype),
                DType::F64 => Self::fold_for_float::<f64>(kind, const_a, const_b, dtype),

                DType::I8 => Self::fold_for_int::<i8>(kind, const_a, const_b, dtype),
                DType::I16 => Self::fold_for_int::<i16>(kind, const_a, const_b, dtype),
                DType::I32 => Self::fold_for_int::<i32>(kind, const_a, const_b, dtype),
                DType::I64 => Self::fold_for_int::<i64>(kind, const_a, const_b, dtype),

                DType::U8 => Self::fold_for_int::<u8>(kind, const_a, const_b, dtype),
                DType::U16 => Self::fold_for_int::<u16>(kind, const_a, const_b, dtype),
                DType::U32 => Self::fold_for_int::<u32>(kind, const_a, const_b, dtype),
                DType::U64 => Self::fold_for_int::<u64>(kind, const_a, const_b, dtype),

                _ => None,
            };

            if let Some(tensor) = folded {
                if let Some(value) = graph.values.get_mut(output_value_id as usize) {
                    value.constant = Some(tensor);
                }
                Self::remove_node_logically(graph, node_id);
                changed = true;
                continue 'scan_nodes;
            }
        }

        PassResult { changed }
    }
}
