use num_traits::{Num, pow};
use crate::autograd::function::{ForwardArgs, Function};
use crate::autograd::function_ctx::FunctionCtx;
use crate::Tensor;

pub struct MseLossBackward<T> {
    ctx: FunctionCtx<T>,
}

impl<T> Function<T> for MseLossBackward<T>
    where
        T: Copy + Num + 'static,
{
    fn new(ctx: FunctionCtx<T>) -> Self {
        Self { ctx }
    }

    fn ctx(&self) -> &FunctionCtx<T> {
        &self.ctx
    }

    fn forward(ctx: &mut FunctionCtx<T>, args: ForwardArgs<T>) -> Tensor<T> {
        if let ForwardArgs::TensorTensor(input, target) = args {
            ctx.save_tensors([input - target].into());
            input.zip(target, |a, b| pow(b - a, 2))
        } else {
            unreachable!()
        }
    }

    fn backward(&self, grad_output: Tensor<T>) -> Vec<Option<Tensor<T>>> {
        let error = &self.ctx.tensors()[0];
        let grad = error * &grad_output;
        [grad.into(), None].into()
    }
}
