use crate::{nn::{active::Activation, layer::Linear}, Var};

pub struct MLP {
    archs: Vec<usize>,
    layers: Vec<Linear>,
}

impl MLP {
    pub fn new(archs: Vec<usize>, activation: Activation) -> Self {
        assert!(archs.len() > 1);        
        let layers = archs.iter().zip(archs.iter().skip(1))
            .map(|(&input_size, &output_size)| -> Linear {
                Linear::new(input_size, output_size, Some(activation))
            })
            .collect();

        Self { archs, layers }
    }

    pub fn input_size(&self) -> usize {
        self.archs[0]
    }

    pub fn output_size(&self) -> usize {
        self.archs.last().unwrap().clone()
    }

    pub fn forward(&self, xs: &[Var]) -> Vec<Var> {
        assert_eq!(xs.len(), self.input_size());
        let mut xs = xs.to_vec();
        for layer in self.layers.iter() {
            xs = layer.forward(&xs);
        }

        assert_eq!(xs.len(), self.output_size());
        xs
    }

    pub fn parameters(&self) -> impl Iterator<Item = &Var> {
        self.layers.iter().flat_map(|neural| neural.parameters())
    }
}

#[cfg(test)]
mod test {
    use crate::nn::criterion::Criterion;
    #[allow(unused)]
    use crate::{
        nn::{
            optim::{Optim, Momentum, SDG}, 
            criterion::MSELoss,
        },
        var
    };
    use float_cmp::approx_eq;
    use super::*;

    #[test]
    fn test_train_and_gate() {
        let train_xs = [
            [var!(0), var!(0)],
            [var!(0), var!(1)],
            [var!(1), var!(0)],
            [var!(1), var!(1)],
        ];
        let train_targets = [
            var!(0), var!(0), var!(0), var!(1)
        ];

        let mlp = MLP::new(vec![2, 4, 1], Activation::Tanh);
        let mut optimizer = Momentum::new(mlp.parameters(), 0.1, 0.1);
        let criterion = MSELoss::new();

        for _ in 0..5000 {
            for (xs, target) in train_xs.iter().zip(train_targets.iter()) {
                let pred = mlp.forward(xs);
                let loss = criterion.loss(&pred, &[target.clone()]);
                // println!("loss: {}", loss.data());
                optimizer.zero_grad();
                loss.backward();
                optimizer.step();
            }
        }

        for (xs, target) in train_xs.iter().zip(train_targets.iter()) {
            let pred = mlp.forward(xs);
            println!("{} & {} => {}", xs[0].data(), xs[1].data(), pred[0].data());
            assert!(approx_eq!(f64, pred[0].data(), target.data(), epsilon = 1e-2));
        }
    }

    #[test]
    fn test_train_or_gate() {
        let train_xs = [
            [var!(0), var!(0)],
            [var!(0), var!(1)],
            [var!(1), var!(0)],
            [var!(1), var!(1)],
        ];
        let train_targets = [
            var!(1), var!(1), var!(1), var!(0)
        ];

        let mlp = MLP::new(vec![2, 4, 1], Activation::Tanh);
        let mut optimizer = SDG::new(mlp.parameters(), 0.1);
        let criterion = MSELoss::new();
    
        for _ in 0..20000 {
            for (xs, target) in train_xs.iter().zip(train_targets.iter()) {
                let pred = mlp.forward(xs);
                let loss = criterion.loss(&pred, &[target.clone()]);
                // println!("loss: {}", loss.data());
                optimizer.zero_grad();
                loss.backward();
                optimizer.step();
            }
        }

        for (xs, target) in train_xs.iter().zip(train_targets.iter()) {
            let pred = mlp.forward(xs);
            println!("{} & {} => {}", xs[0].data(), xs[1].data(), pred[0].data());
            assert!(approx_eq!(f64, pred[0].data(), target.data(), epsilon = 1e-2));
        }
    }

    #[test]
    fn test_train_xor_gate() {
        let train_xs = [
            [var!(0), var!(0)],
            [var!(0), var!(1)],
            [var!(1), var!(0)],
            [var!(1), var!(1)],
        ];
        let train_targets = [
            var!(0), var!(1), var!(1), var!(0)
        ];

        let mlp = MLP::new(vec![2, 4, 1], Activation::Tanh);
        let mut optimizer = SDG::new(mlp.parameters(), 0.1);
        let criterion = MSELoss::new();
    
        for _ in 0..10000 {
            for (xs, target) in train_xs.iter().zip(train_targets.iter()) {
                let pred = mlp.forward(xs);
                let loss = criterion.loss(&pred, &[target.clone()]);
                // println!("loss: {}", loss.data());
                optimizer.zero_grad();
                loss.backward();
                optimizer.step();
            }
        }

        for (xs, target) in train_xs.iter().zip(train_targets.iter()) {
            let pred = mlp.forward(xs);
            println!("{} & {} => {}", xs[0].data(), xs[1].data(), pred[0].data());
            assert!(approx_eq!(f64, pred[0].data(), target.data(), epsilon = 1e-2));
        }
    }
}