# 深层神经网络
# 单层神经网络批量化

#首先导入数据集：
import  skimage.io as io
import numpy as np
import matplotlib.pyplot as plt

# 指定训练集
train=io.ImageCollection('./data/train/*.jpg')
# 指定测试集
test=io.ImageCollection('./data/test/*.jpg')

# 500个训练样本，250个猫，250个非猫
X_train=np.asarray(train)
# 输出标签
y_train=np.hstack((np.ones(250),np.zeros(250)))

# 200个训练样本，100个猫图标，100个非猫图片
X_test=np.asarray(test)
# 输出标签
y_test=np.hstack((np.ones(100),np.zeros(100)))

m_train=X_train.shape[0]
m_test=X_test.shape[0]
w,h,d=X_train.shape[1],X_train.shape[2],X_train.shape[3]

print('训练样本数量：%d' % m_train)
print('测试样本数量：%d' % m_test)
print('每张图片的维度：(%d, %d, %d)' % (w, h, d))

# 从训练样本中随机选择 10 张图片显示，并识别其是否为猫类图片。结果 y = 1 表示是猫类图片；y = 0 表示非猫类图片
def previewData():
    idx = [np.random.choice(m_train) for _ in range(10)]  # 随机选择 10 张图片
    label = y_train[idx]
    for i in range(2):
        for j in range(5):
            plt.subplot(2, 5, 5*i+j+1)
            plt.imshow(X_train[idx[5*i+j]])
            plt.title("y="+str(label[5*i+j]))
            plt.axis('off')
    plt.show()
    return

# 预处理
# 图片矩阵一维化
X_train = X_train.reshape(m_train, -1).T
X_test = X_test.reshape(m_test, -1).T

print('训练样本维度：' + str(X_train.shape))
print('测试样本维度：' + str(X_test.shape))

# 图片像素归一化到 [0,1] 之间
X_train = X_train / 255
X_test = X_test / 255


# 初始化参数
def initialize_parameters(layer_dims):

    parameters = {}          # 存储参数 W 和 b 的字典
    L = len(layer_dims)      # 神经网络的层数，包含输入层

    for l in range(1, L):
        # W 初始化为均值 0，方差为 0.1 的服从高斯分布随机值
        parameters['W' + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1]) * 0.1
        parameters['b' + str(l)] = np.zeros((layer_dims[l],1))

    return parameters

# 正向传播
def sigmoid(Z):
    '''
    逻辑回归激活函数
    :param Z:
    :return:
    '''
    return 1/(1+np.exp(-Z))

def relu(Z):
    '''
     ReLU 激活函数
    :param Z:
    :return:
    '''
    return np.maximum(0,Z)

# 单个神经元运算单元
def linear_activation_forward(A_prev,W,b,activation):
    Z = np.dot(W, A_prev) + b        # 线性输出
    if activation == "sigmoid":
        A = sigmoid(Z)
    elif activation == "relu":
        A = relu(Z)

    cache = (A_prev, W, b, Z)

    return A, cache

# 完整正向传播
def model_forward(X, parameters):

    caches = []
    A = X
    L = len(parameters) // 2                  # 神经网络层数 L

    # L-1 层使用 ReLU
    for l in range(1, L):
        A_prev = A
        A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], "relu")
        caches.append(cache)

    # L 层使用 Sigmoid
    AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], "sigmoid")
    caches.append(cache)

    return AL, caches

# 损失函数
def compute_cost(AL,Y):
    '''

    :param AL:
    :param Y:
    :return:
    '''
    m=AL.shape[1]
    cost=-1/m*np.sum(Y*np.log(AL)+(1-Y)*np.log(1-AL))
    cost=np.squeeze(cost)
    return cost

# 反向传播
def relu_backward(dA,Z):
    '''
    ReLU 的求导函数
    :param dA:
    :param Z:
    :return:
    '''
    dZ=np.array(dA,copy=True)
    dZ[Z<=0]=0
    return  dZ

def sigmoid_backward(dA,Z):
    '''
    逻辑回归的求导函数
    :param dA:
    :param Z:
    :return:
    '''
    s=1/(1+np.exp(-Z))
    dZ=dA*s*(1-s)
    return  dZ

def linear_activation_backward(dA,cache,activation):
    '''
    单层反向传播函数
    :param dA:
    :param cache:
    :param activation:
    :return:
    '''
    A_prev,W,b,Z=cache

    if activation=='relu':
        dZ=relu_backward(dA,Z)
    elif activation=='sigmoid':
        dZ=sigmoid_backward(dA,Z)

    m=dA.shape[1]
    dW=1/m*np.dot(dZ,A_prev.T)
    db=db = 1/m*np.sum(dZ,axis=1,keepdims=True)
    dA_prev = np.dot(W.T,dZ)

    return  dA_prev,dW,db

def model_backward(AL,Y,caches):
    '''
    L层的反向传播
    :param AL:
    :param Y:
    :param caches:
    :return:
    '''
    grads = {}
    L = len(caches)         # 神经网络层数，包括输入层
    m = AL.shape[1]         # 样本个数
    Y = Y.reshape(AL.shape) # 保证 Y 与 AL 维度一致

    # AL 值
    dAL = -(np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))

    # 第 L 层，激活函数是 Sigmoid
    current_cache = caches[L-1]
    grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")

    # 前 L-1 层，激活函数是 ReLU
    for l in reversed(range(L-1)):
        current_cache = caches[l]
        dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 1)], current_cache, activation = "relu")
        grads["dA" + str(l)] = dA_prev_temp
        grads["dW" + str(l + 1)] = dW_temp
        grads["db" + str(l + 1)] = db_temp

    return grads

def update_parameters(parameters,grads,learning_rate=0.01):
    '''
    更新网络参数 W 和 b
    :param parameters:
    :param grads:
    :param learning_rate:
    :return:
    '''
    L = len(parameters) // 2           # 神经网络层数，包括输入层

    for l in range(L):
        parameters["W" + str(l+1)] -=  learning_rate*grads["dW" + str(l+1)]
        parameters["b" + str(l+1)] -=  learning_rate*grads["db" + str(l+1)]

    return parameters

#整个神经网络模型
def nn_model(X, Y, layers_dims, learning_rate = 0.01, num_iterations = 3000, print_cost=False):

    np.random.seed(1)
    costs = []

    # 参数初始化
    parameters = initialize_parameters(layers_dims)

    # 迭代训练
    for i in range(0, num_iterations):

        # 正向传播
        AL, caches = model_forward(X, parameters)

        # 计算损失函数
        cost = compute_cost(AL, Y)

        # 反向传播
        grads = model_backward(AL, Y, caches)

        # 更新参数
        parameters = update_parameters(parameters, grads, learning_rate)

        # 每迭代 100 次，打印 cost
        if print_cost and i % 100 == 0:
            print ("Cost after iteration %i: %f" %(i, cost))
        if print_cost and i % 100 == 0:
            costs.append(cost)

    # 绘制 cost 趋势图
    plt.plot(np.squeeze(costs))
    plt.ylabel('cost')
    plt.xlabel('iterations (per hundred)')
    plt.title("Learning rate =" + str(learning_rate))
    plt.show()

    return parameters

# 模型预测
def predict(X, y, parameters):

    m = X.shape[1]
    p = np.zeros((1,m))

    # 正向传播
    AL, caches = model_forward(X, parameters)
    predictions = AL > 0.5

    accuracy = np.mean(predictions == y)

    print("Accuracy: %f" % accuracy)

    return predictions

layers_dims = [12288, 100,10, 1] #  2-layer model
parameters = nn_model(X_train, y_train, layers_dims, learning_rate = 0.04, num_iterations = 1500, print_cost = True)
# 输出准确率
pred_train = predict(X_train, y_train, parameters)
pred_test = predict(X_test, y_test, parameters)