import numpy as np

def cross_entropy_error(y_true, y_pred):

    loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))

    return loss


def softmax(x):
    exps = np.exp(x)

    return exps / np.sum(exps)

def cross_entropy_error_mul(y_true, y_pred):
    delta = 1e-7

    p = softmax(y_pred)

    loss = -np.sum(y_true * np.log(p + delta))

    return loss



