use crate::Var;
use super::Optim;

pub struct Adam {
    parameters: Vec<Var>,

    learn_rate: f64,
    beta1: f64,
    beta2: f64,
    epsilon: f64,

    mv: Vec<(f64, f64)>,

    time: f64,
}

impl Adam {
    pub fn new<'a, I: Iterator<Item = &'a Var>>(parameters: I, learn_rate: f64, beta1: f64, beta2: f64, epsilon: f64) -> Self {
        let parameters: Vec<_> = parameters.cloned().collect();
        let mv = vec![(0., 0.); parameters.len()];
        Self { 
            parameters,
            learn_rate, 
            beta1,
            beta2,
            epsilon,
            mv,
            time: 0.,
        }
    }

    pub fn default<'a, I: Iterator<Item = &'a Var>>(parameters: I) -> Self {
        Self::new(parameters, 0.01, 0.9, 0.999, 1e-8)
    }

    pub fn learn_rate(&self) -> f64 {
        self.learn_rate
    }

    pub fn epsilon(&self) -> f64 {
        self.epsilon
    }

    pub fn beta1(&self) -> f64 {
        self.beta1
    }

    pub fn beta2(&self) -> f64 {
        self.beta2
    }
}

impl Optim for Adam {
    fn parameters(&self) -> impl Iterator<Item = &Var> {
        self.parameters.iter()
    }

    fn step(&mut self) {
        self.time += 1.;
        for (param, c) in self.parameters.iter_mut().zip(self.mv.iter_mut()) {
            let grad = param.gard();
            c.0 = self.beta1 * c.0 + (1. - self.beta1) * grad;
            c.1 = self.beta2 * c.1 + (1. - self.beta2) * grad.powf(2.);
            
            let m_hat = c.0 / (1. - self.beta1.powf(self.time));
            let v_hat = c.1 / (1. - self.beta2.powf(self.time));

            param.increase_data(-self.learn_rate * m_hat / (v_hat.sqrt() + self.epsilon));
        }
    }
}