use num_traits::Num;
use crate::Tensor;
use crate::optim::optimizer::Optimizer;

pub struct SGD<T> {
    params: Vec<Tensor<T>>,
    lr: T,
    weight_decay: T,
}

impl<T> SGD<T>
    where
        T: Copy + Num + 'static,
{
    pub fn new(params: Vec<Tensor<T>>, lr: T) -> Self {
        SGD {
            params,
            lr,
            weight_decay: T::zero(),
        }
    }
}

impl<T> Optimizer for SGD<T>
where
    T: Copy + From<f64> + Num + 'static,
{
    fn step(&self) {
        self.params
            .iter()
            .filter(|param| param.grad().is_some())
            .map(|param| (param.grad().unwrap(), param))
            // .map(|(grad, param)| {
            //     if self.weight_decay != T::zero() {
            //         (&grad + &(&scalar(self.weight_decay) * &param), param)
            //     } else {
            //         (grad, param)
            //     }})
            .for_each(|(grad, param)| {
                let mut cell = (*param.ptr).borrow_mut();
                let grad_cell = (*grad.ptr).borrow();

                cell.data = cell.data.iter()
                    .zip(grad_cell.data.iter())
                    .map(|(a, b)| *a - self.lr * *b)
                    .collect::<Box<[T]>>()
                    .into()
            })
    }

    fn zero_grad(&self, set_to_none: bool) {
        self.params
            .iter()
            .for_each(|param| param.zero_grad(set_to_none));
    }
}
