use std::rc::Rc;
use num_traits::Num;
use crate::autograd::function::{ForwardArgs, Function};
use crate::autograd::function_ctx::FunctionCtx;
use crate::Tensor;

pub struct MmBackward<T> {
    ctx: FunctionCtx<T>,
}

impl<T> Function<T> for MmBackward<T>
    where
        T: Copy + Num + 'static,
{
    fn new(ctx: FunctionCtx<T>) -> Self {
        Self { ctx }
    }

    fn ctx(&self) -> &FunctionCtx<T> {
        &self.ctx
    }

    fn forward(ctx: &mut FunctionCtx<T>, args: ForwardArgs<T>) -> Tensor<T> {
        if let ForwardArgs::TensorTensor(lhs, rhs) = args {
            ctx.save_tensors([lhs.clone(), rhs.clone()].into());

            let lhs = (*lhs.ptr).borrow();
            let rhs = (*rhs.ptr).borrow();

            assert_eq!(lhs.shape.len(), 2, "self must be a matrix");
            assert_eq!(rhs.shape.len(), 2, "other must be a matrix");
            assert_eq!(lhs.shape[1], rhs.shape[0], "self and other shapes cannot be multiplied");

            let mut buf = vec![T::zero(); lhs.shape[0] * rhs.shape[1]];

            for i in 0..lhs.shape[0] {
                for j in 0..rhs.shape[1] {
                    let mut dot = T::zero();
                    for k in 0..lhs.shape[1] {
                        let lhs_idx = lhs.offset + i * lhs.strides[0] + k * lhs.strides[1];
                        let rhs_idx = rhs.offset + k * rhs.strides[0] + j * rhs.strides[1];
                        dot = dot + lhs.data[lhs_idx] * rhs.data[rhs_idx];
                    }
                    buf[i * rhs.shape[1] + j] = dot;
                }
            }

            (Rc::new(buf.into_boxed_slice()), [lhs.shape[0], rhs.shape[1]].into()).into()
        } else {
            unreachable!()
        }
    }

    fn backward(&self, grad_output: Tensor<T>) -> Vec<Option<Tensor<T>>> {
        let tensors = self.ctx.tensors();
        let grad0 = grad_output.mm(&tensors[1].t());
        let grad1 = tensors[0].t().mm(&grad_output);
        [grad0.into(), grad1.into()].into()
    }
}
