import numpy as np
import utils


def SquaredLoss(target, outputs):
    return (target - outputs) * (target - outputs)


def CrossEntropyLoss(target, outputs):
    """
    if len(target) != len(outputs):
        raise ValueError("Target should have same elements with outputs, your"
                         + " target:" + str(len(target)) + ", your outputs:" +
                         str(len(outputs)) + ".")
    """
    sumV = utils.Tensor(0)
    for i in range(len(target)):
        sumV -= target[i] * np.log10(outputs[i])
    #print(sumV)
    return sumV


def grad_CrossEntropyLoss(target, outputs):
    return -target / outputs
