# 损失函数的设计延续了Layer的模式，但是因为需要注意的是forward和backward部分有些不同
# MSE_loss = (predict_value - label) ^ 2
# MSE方法和Layer的区别：
#   forward：y是组网输出的值，target是目标值（这里的输入是组网的输出和目标值），前向传播的同时把dloss / dy 计算出来
#   backward: 没有参数，因为在forward的时候，计算出了dloss / dy，所以这里不需要输入参数
from 神经网络原理.fc_test import Layer
import numpy as np

class MSE(Layer):
    """
    Mean Square Error:
        J = 0.5 * (y - target)^2
    gradient formula:
        dJ/dy = y - target
    """

    def __init__(self, name='mse', reduction='mean', *args, **kwargs):
        super().__init__(name=name, *args, **kwargs)
        assert reduction in ['mean', 'none',
                             'sum'], "reduction only support 'mean', 'none' and 'sum', but got {}.".format(reduction)
        self.reduction = reduction
        self.pred = None
        self.target = None

    def forward(self, y, target):
        assert y.shape == target.shape, "The shape of y and target is not same, y shape = {} but target shape = {}".format(
            y.shape, target.shape)
        self.pred = y
        self.target = target
        loss = 0.5 * np.square(y - target)
        if self.reduction is 'mean':
            return loss.mean()
        elif self.reduction is 'none':
            return loss
        else:
            return loss.sum()

    def backward(self):
        gradient = self.pred - self.target
        return gradient