import numpy as np


def sigmod(x):
    s = 1/ (1 + np.exp(-x))
    return s
#梯度
def sigmod_derivative(x):
    s = sigmod(x)
    ds = s * (1-s)
    return ds

def images2vector(x):
    v = x.reshape((x.shape[0] * x.shape[1] * x.shape[2]), 1)
    return v

def normalizeRows(x):
    x_norm = np.linalg.norm(x, axis=1, keepdims=True)
    norm_x = x / x_norm
    return norm_x

def softMax(x):
    x_exp = np.exp(x)
    x_sum = np.sum(x_exp, axis=1, keepdims=True)
    print(np.shape(x_sum))
    x = x_exp / x_sum
    return x

#L1 loss function
def L1(yhat, y):
    loss = np.sum(np.abs(y-yhat))
    return loss

#L2 loss function
def L2(yhat, y):
    loss = np.sum(np.dot((y-yhat), (y-yhat).T))
    return loss
# x = np.array([1, 2, 3])
# images = np.array(
#     [[[1, 2, 3],
#  [4, 5, 6]],
# [[1, 2, 3],
#   [4, 5, 6]]]
# )
# xx = np.array([[0, 3, 4],
#                [1, 6, 4]])
#print ('images2vector='+ str(images2vector(images)))
#print (sigmod_derivative(x))
#print(normalizeRows(xx))
x = np.array([[9, 2, 5, 0, 0],
             [7, 5, 0, 0, 0]])
yhat = np.array([.9, 0.2, 0.1, .4, .9])
y = np.array([1, 0, 0, 1, 1])
xx = np.zeros(len(x))
print (xx)
#print(softMax(x))
print(L2(yhat, y))