# 正则化

import numpy as np
import matplotlib.pyplot as plt
import sklearn              #机器学习的第三方工具包
import sklearn.datasets     #sklearn的数据集
import reg_utils

train_X, train_Y, test_X, test_Y = reg_utils.load_2D_dataset(is_plot = False)

def model(X, Y, learning_rate = 0.03, num_iteration = 10000, lambd = 0,keep_prob = 1, print_cost = True, is_plot = True):

    costs = []
    layer_dims = [X.shape[0],20,3,1]

    parameters = reg_utils.initialize_parameters(layer_dims)

    for i in range(num_iteration):
        if keep_prob == 1:
            a3, cache = reg_utils.forward_propagation(X, parameters)
        elif keep_prob < 1:
            a3, cache = forward_propagation_dropout(X, parameters, keep_prob)
        else: 
            print("keep_prob error")
        
        if keep_prob == 1 and lambd == 0 :
            grads = reg_utils.backward_propagation(X, Y, cache)
        elif lambd != 0:
            grads = backward_propagation_L2(X, Y, cache, lambd)
        elif keep_prob < 1:
            grads = backward_propagation_dropout(X, Y, cache, keep_prob)
        else :
            print("error")

        parameters = reg_utils.update_parameters(parameters, grads, learning_rate)

        if i%1000 == 0:
            if lambd == 0:
                cost = reg_utils.compute_cost(a3, Y)
            else:
                cost = compute_cost_L2(a3, Y, parameters, lambd)
            costs.append(cost)
            if print_cost:
                print(cost)

    if is_plot:
        plt.plot(costs)
        plt.show()

    reg_utils.predict(test_X, test_Y, parameters)

    return

def forward_propagation_dropout(X, parameters, keep_prob):

    np.random.seed(1)
    L = 4
    W = [0]
    #W[0] = 0        #改索引从1开始
    b = [0]
    #b[0] = 0

    for i in range(1,L):
        ww = parameters["W" + str(i)]
        bb = parameters["b" + str(i)]
        W.append(ww)
        b.append(bb)

    Z1 = np.dot(W[1], X) + b[1]
    A1 = reg_utils.relu(Z1)

    D1 = np.random.rand(A1.shape[0], A1.shape[1])
    D1 = D1 < keep_prob
    A1 = np.multiply(D1, A1)
    A1 = A1 / keep_prob

    Z2 = np.dot(W[2], A1) + b[2]
    A2 = reg_utils.relu(Z2)

    D2 = np.random.rand(A2.shape[0], A2.shape[1])
    D2 = D2 < keep_prob
    A2 = np.multiply(D2, A2)
    A2 = A2 / keep_prob

    Z3 = np.dot(W[3], A2) + b[3]
    A3 = reg_utils.sigmoid(Z3)
    
    cache = (Z1, A1, D1, W[1], b[1], Z2, A2, D2, W[2], b[2], Z3, A3, W[3], b[3])

    return A3, cache

def backward_propagation_L2(X, Y, cache, lambd):

    m = Y.shape[1]
    (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache

    dZ3 = 1.0/m * (A3 - Y)
    dW3 = np.dot(dZ3, A2.T) + 1.0/m * lambd * W3
    db3 = np.sum(dZ3, axis = 1, keepdims = True)

    dA2 = np.dot(W3.T, dZ3)
    dZ2 = np.multiply(dA2, np.int64(A2>0))
    dW2 = np.dot(dZ2, A1.T) + 1.0/m * lambd * W2
    db2 = np.sum(dZ2, axis = 1, keepdims = True)

    dA1 = np.dot(W2.T, dZ2)
    dZ1 = np.multiply(dA1, np.int64(A1>0))
    dW1 = np.dot(dZ1, X.T) + 1.0/m * lambd * W1
    db1 = np.sum(dZ1, axis = 1, keepdims = True)

    grads = {
        "dz3": dZ3, "dW3": dW3, "db3": db3,
        "da2": dA2, "dz2": dZ2, "dW2": dW2, "db2": db2,
        "da1": dA1, "dz1": dZ1, "dW1": dW1, "db1": db1
    }

    return grads

def backward_propagation_dropout(X, Y, cache, keep_prob):

    m = Y.shape[1]
    (Z1, A1, D1, W1, b1, Z2, A2, D2, W2, b2, Z3, A3, W3, b3) = cache

    dZ3 = 1.0/m * (A3 - Y)
    dW3 = np.dot(dZ3, A2.T)
    db3 = np.sum(dZ3, axis = 1, keepdims = True)

    dA2 = np.dot(W3.T, dZ3)
    dA2 = dA2 * D2
    dA2 = dA2 / keep_prob       #丢弃的是神经网络节点，所以只使A为0即可

    dZ2 = np.multiply(dA2, np.int64(A2>0))
    dW2 = np.dot(dZ2, A1.T)
    db2 = np.sum(dZ2, axis = 1, keepdims = True)

    dA1 = np.dot(W2.T, dZ2)
    dA1 = dA1 * D1
    dA1 = dA1 / keep_prob

    dZ1 = np.multiply(dA1, np.int64(A1>0))
    dW1 = np.dot(dZ1, X.T)
    db1 = np.sum(dZ1, axis = 1, keepdims = True)

    grads = {
        "dz3": dZ3, "dW3": dW3, "db3": db3,
        "da2": dA2, "dz2": dZ2, "dW2": dW2, "db2": db2,
        "da1": dA1, "dz1": dZ1, "dW1": dW1, "db1": db1
    }

    return grads

def compute_cost_L2(a3, Y, parameters, lambd):

    m = Y.shape[1]
    L = 4
    W = []

    for i in range(1,L):
        ww = parameters["W" + str(i)]
        W.append(ww)
    
    norm_W = np.linalg.norm(W[0], ord=2) + np.linalg.norm(W[1], ord=2) + np.linalg.norm(W[2], ord=2)
    logprob = -(np.multiply(Y, np.log(a3)) + np.multiply((1-Y),np.log(1-a3)))
    cost = 1.0/m * (np.nansum(logprob) + lambd * norm_W)

    return cost

model(train_X, train_Y, keep_prob=0.86, learning_rate=0.3, is_plot=True)