import numpy as np
import matplotlib.pyplot as plt
import h5py     #python中的一个负责数据储存的包
from lr_utils import load_dataset

"""
单层神经网络
"""

train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = load_dataset()

def process():
    """
    数据读取与归一化
    """
    train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = load_dataset()
    num_train = train_set_y.shape[1]
    num_test = test_set_y.shape[1]
    train_set_x_flatten = np.reshape(train_set_x_orig,(train_set_x_orig.shape[0],-1),'C').T
    test_set_x_flatten = np.reshape(test_set_x_orig,(test_set_x_orig.shape[0],-1),'C').T

    train_mean = np.mean(train_set_x_flatten)
    test_mean = np.mean(test_set_x_flatten)
    train_set_x_flatten = (train_set_x_flatten - train_mean) / np.std(train_set_x_flatten)
    test_set_x_flatten = (test_set_x_flatten - test_mean) / np.std(test_set_x_flatten)

    return (train_set_x_flatten, test_set_x_flatten, train_set_y, test_set_y, num_train, num_test)

def sigmoid(z):

    s = 1 / (1 + np.exp(-z))

    return s

def initialization(dim):
    """
    dim：维数
    使用维数初始化权重W和b
    """

    W = np.random.randn(dim,1)/209
    b = np.random.randn()/209

    return (W, b)

def forward_propogation(W, b, X, Y):
    """
    前向传播
    A：最后输出的预测结果（非整数）
    """

    m = X.shape[1]
    Z = np.dot(W.T, X) + b
    A = sigmoid(Z)

    cost = (-1/m)*np.sum((np.multiply(Y, np.log(A)) + np.multiply((1 - Y), np.log(1 - A))))

    return (A, cost)

def back_propagation(X, Y, A):
    """
    后向传播
    grad：W，b的导数
    """

    m = X.shape[1]
    dZ = (A - Y) / m
    dW = np.dot(X, dZ.T)
    db = np.sum(dZ)

    grad = {
        "dW" : dW,
        "db" : db
    }

    return grad

def optimize(W, b, X, Y, num_iteration, learning_rate, print_cost):
    """
    梯度下降法
    """

    costs = []
    for i in range(num_iteration):
        (A, cost) = forward_propogation(W, b, X, Y)
        grad = back_propagation(X, Y, A)

        (dW, db) = (grad['dW'], grad['db'])

        W = W - learning_rate*dW
        b = b - learning_rate*db

        if i % 100 == 0:
            costs.append(cost)
            if print_cost:
                print("%f"%cost)
            
    param = {
        "W": W,
        "b": b
    }

    return (param, costs)

def model():

    (train_set_x_flatten, test_set_x_flatten, train_set_y, test_set_y, num_train, num_test) = process()
    (W, b) = initialization(train_set_x_flatten.shape[0])
    (param, costs) = optimize(W, b,train_set_x_flatten, train_set_y, num_iteration = 1000, learning_rate = 0.001, print_cost = True)
    correct_train = predict(param, train_set_x_flatten, train_set_y)
    correct_test = predict(param, test_set_x_flatten, test_set_y)
    print("test correct:%f\n"%correct_train)
    print("test correct:%f\n"%correct_test)

    return (param, costs)

def predict(param, X, Y):
    """
    预测函数并得到正确率
    """    

    m = Y.shape[1]
    Y_predict = np.zeros((1,m))
    W = param['W']
    b = param['b']

    (A, cost) = forward_propogation(W, b, X, Y)
    for i in range(m):
        if A[0,i] > 0.5:
            Y_predict[0,i] = 1
    correct = 1 - np.sum(abs(Y_predict - Y)) / Y.shape[1]

    return correct

(param, costs) = model()

plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =")
plt.show()