import numpy as np
import matplotlib.pyplot as plt

np.random.seed(1)

def initialize_parameters_deep(layer_dims):
    """
    参数：
    layer_dims -- 这个list列表里面，包含了每层神经元的个数
    
    返回值：
    parameters -- 这个字典里面包含了每层对应的已经初始化了的W和b
    
    """
    np.random.seed(1)
    parameters = {}
    L = len(layer_dims) # 获取神经网络的层数
    
    for l in range(1, L):
        # 构建并随机初始化该层的W
        parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) / np.sqrt(layer_dims[l - 1])
        parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
        
        assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))
        assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
        
    return parameters

def linear_forward(A, W, b):
    Z = np.dot(W, A) + b
    
    assert(Z.shape == (W.shape[0], A.shape[1]))
    cache = (A, W, b)
    
    return Z, cache

def linear_activation_forward(A_prev, W, b, activation):
    """
    参数：
    A_prev -- 上一层得到的A，输入到本层来计算Z和本层的A。第一层时A_prev就是特征输入
    W -- 本层相关的W
    b -- 本层相关的b
    activation -- 两个字符串，"sigmoid"或"relu",指示该层应该使用哪种激活函数
    """
    
    Z, linear_cache = linear_forward(A_prev, W, b)
    
    if activation == "sigmoid":
        A = sigmoid(Z)
    elif activation == "relu":
        A = relu(Z)
        
    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, Z)
    
    return A, cache

def L_model_forward(X, parameters):
    """
    参数：
    X -- 输入的特征数据
    parameters -- 这个list列表里面包含了每一层的参数w和b
    """
    
    caches = []
    A = X
    
    # 获取参数列表的长度，这个长度的一半是神经网络的层数
    # [w1,b1,w2,b2...wl,bl]
    L = len(parameters) // 2
    
    for l in range(1, L):
        A_prev = A
        A, cache = linear_activation_forward(A_prev,
                                             parameters['W' + str(l)],
                                             parameters['b' + str(l)],
                                             activation='relu')
        caches.append(cache)
    
    # 进行最后一层的前向传播
    AL, cache = linear_activation_forward(A,
                                        parameters['W' + str(L)],
                                        parameters['b' + str(L)],
                                        activation='sigmoid')
    caches.append(cache)
    
    assert(AL.shape == (1, X.shape[1]))
    
    return AL, caches

def compute_cost(AL, Y):
    m = Y.shape[1]
    cost = (-1/m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply(1 - Y, np.log(1 - AL)))
    
    cost = np.squeeze(cost)
    assert(cost.shape == ())
    
    return cost

def linear_backward(dZ, cache):
    """
    参数：
    dZ -- 后面一层的dZ
    cache -- 前向传播时我们保存下来的关于本层的一些变量
    """
    A_prev, W, b = cache
    m = A_prev.shape[1]
    
    dW = np.dot(dZ, cache[0].T) / m
    db = np.sum(dZ, axis=1, keepdims=True) / m
    dA_prev = np.dot(cache[1].T, dZ)
    
    assert (dA_prev.shape == A_prev.shape)
    assert (dW.shape == W.shape)
    assert (db.shape == b.shape)
    
    return dA_prev, dW, db

def linear_activation_backward(dA, cache, activation):
    """
    参数：
    dA  -- 本层的dA
    cache -- 前向传播时保存的本层的相关变量
    activation -- 指示该层使用的是什么激活函数：“sigmoid”或“relu”
    """
    linear_cache, activation_cache = cache
    
    if activation == "relu":
        dZ = relu_backward(dA, activation_cache)
    elif activation == "sigmoid":
        dZ = sigmoid_backward(dA, activation_cache)
    
    dA_prev, dW, db = linear_backward(dZ, linear_cache)
    
    return dA_prev, dW, db

def L_model_backward(AL, Y, caches):
    """
    参数:
    AL -- 最后一层的A，也就是y'，预测出的标签
    Y -- 真是标签
    caches -- 前向传播时保存的每一层的相关变量，用于辅助计算反向传播
    """
    grads = {}
    L = len(caches) # 获取神经网络层数
    Y = Y.reshape(AL.shape)
    
    # 计算出最后一层的dA
    dAL = -(np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
    # 计算出最后一层的dW和db
    current_cache = caches[-1]
    grads["dA" + str(L-1)],grads["dW" + str(L)],grads["db" + str(L)] = linear_activation_backward(
                                                                                              dAL,
                                                                                              current_cache,
                                                                                              activation = "sigmoid")
    
    # 计算前面L-1层到第一层的每层的梯度，全是relu激活函数
    for c in reversed(range(1,L)):
        grads["dA" + str(c-1)],grads["dW" + str(c)],grads["db" + str(c)] = linear_activation_backward(
            grads["dA" + str(c)],
            caches[c-1],
            activation = "relu")
        
    return grads

def update_parameters(parameters, grads, learning_rate):
    """
    参数：
    parameters -- 每一层的参数w和b
    grads -- 每一层的梯度
    """
    
    L = len(parameters) // 2
    
    for l in range(1, L + 1):
        parameters["W" + str(l)] = parameters["W" + str(l)] - learning_rate * grads["dW" + str(l)]
        parameters["b" + str(l)] = parameters["b" + str(l)] - learning_rate * grads["db" + str(l)]
    
    return parameters

def dnn_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False):
    """
    参数：
    X -- 数据集
    Y -- 数据集标签
    layers_dims -- 指示深度神经网络用多少层，每层有多少个神经元
    
    
    """
    
    np.random.seed(1)
    costs = []
    
    #初始化每层的参数w和b
    parameters = initialize_parameters_deep(layers_dims)
    
    # 按照指示次数来训练深度神经网络
    for i in range(0, num_iterations):
        # 进行前向传播
        AL, caches = L_model_forward(X, parameters)
        # 计算成本
        cost = compute_cost(AL, Y)
        # 进行反向传播
        grads = L_model_backward(AL, Y, caches)
        # 更新参数
        parameters = update_parameters(parameters, grads, learning_rate)
        
        if i % 100 == 0:
            if print_cost and i > 0:
                print("训练%i次后成本是: %f" %(i, cost))
            costs.append(cost)
            
    plt.plot(np.squeeze(costs))
    plt.ylabel('cost')
    plt.xlabel('iterarions(per tens)')
    plt.title('Learning rate =' + str(learning_rate))
    plt.show()
        
    return parameters

def predict(X, parameters):
    m = X.shape[1]
    n = len(parameters)
    p = np.zeros((1, m))
    
    probas, caches = L_model_forward(X, parameters)
    
    for i in range(0, probas.shape[1]):
        if probas[0, i] > 0.5:
            p[0, i] = 1
        else:
            p[0, i] = 0
    
    return p