import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import h5py
import skimage.transform as tf
sys.path.append(os.path.abspath(os.path.dirname(__file__)) + "/..")


def load_dataset():
    '''
    加载数据集
    '''
    train_dataset = h5py.File('datasets/train_catvnoncat.h5', 'r')
    train_set_x = np.array(train_dataset['train_set_x'][:])
    train_set_y = np.array(train_dataset['train_set_y'][:])
    test_dataset = h5py.File('datasets/test_catvnoncat.h5', 'r')
    test_set_x = np.array(test_dataset['test_set_x'][:])
    test_set_y = np.array(test_dataset['test_set_y'][:])
    return train_set_x, train_set_y, test_set_x, test_set_y


train_set_x, train_set_y, test_set_x, test_set_y = load_dataset()
m_train = train_set_x.shape[0]
m_test = test_set_x.shape[0]
numpx = train_set_x.shape[1]
print(f'train_set_x {train_set_x.shape} train_set_y {train_set_y.shape}, test_set_x {test_set_x.shape} test_set_y {test_set_y.shape}')


def init_parameters(dnn_conf: list[int]):
    '''
    初始化权重和偏置
    dnn_conf:为层数配置的列表
    注:不能为全0,用随机值
    '''
    np.random.seed(2)
    parameters = []
    # 输入层不算
    for i in range(1, len(dnn_conf)):
        parameters.append({
            'W': np.random.randn(dnn_conf[i], dnn_conf[i - 1])/np.sqrt(dnn_conf[i - 1]),
            'B': np.zeros((dnn_conf[i], 1)),
        })
        assert (parameters[-1]['W'].shape == (dnn_conf[i], dnn_conf[i-1]))
    return parameters


def sigmoid(Z):
    '''
    激活函数
    '''
    return 1/(1 + np.exp(-Z))


def sigmoid_backward(dA, Z):
    '''
    sigmodid偏导数
    '''
    s = 1/(1+np.exp(-Z))
    return dA*s*(1-s)


def relu(Z):
    '''
    relu激活函数
    '''
    return np.maximum(0, Z)


def relu_backward(dA, Z):
    '''
    relu偏导数
    '''
    dZ = np.array(dA, copy=True)
    dZ[Z <= 0] = 0
    assert (dZ.shape == Z.shape)
    return dZ


def forward_propagate(parameters, X):
    '''
    前向传播
    '''
    PreA = X
    layLen = len(parameters)
    for i in range(0, layLen):
        pi = parameters[i]
        pi['Z'] = np.dot(pi['W'], PreA) + pi['B']
        # 最后二元输出用sigmoid
        af = sigmoid if (i + 1) == layLen else relu
        pi['A'] = PreA
        PreA = af(pi['Z'])
    return PreA


def compute_cost(Y, AL):
    '''
    计算成本
    '''
    m = Y.shape[1]
    return np.squeeze((-1/m)*np.sum(np.multiply(Y, np.log(AL))+np.multiply((1-Y), np.log(1-AL))))


def backword_propage(parameters, Y, AL):
    '''
    后向传播
    '''
    llen = len(parameters)
    m = AL.shape[1]
    dA = np.divide(-Y, AL) + np.divide(1-Y, 1-AL)
    for i in range(0, llen):
        # 反着来
        pi = parameters[llen-i-1]
        bf = sigmoid_backward if i == 0 else relu_backward
        pi['dZ'] = bf(dA, pi['Z'])
        dz = pi['dZ']
        Z = pi['Z']
        pi['dW'] = np.dot(pi['dZ'], pi['A'].T)/m
        pi['dB'] = np.sum(pi['dZ'], axis=1, keepdims=True)/m
        dA = np.dot(pi['W'].T, pi['dZ'])


def update_parameters(parameters, learning_rate):
    '''
    更新参数wb
    '''
    for pi in parameters:
        pi['W'] = pi['W'] - learning_rate*pi['dW']
        pi['B'] = pi['B'] - learning_rate*pi['dB']


def optimize(parameters, train_X, train_Y, num, learning_rate):
    '''
    优化函数，只需要w,b原始值及训练特征，训练标签，次数和学习率
    '''
    for i in range(num):
        # 前向传播
        AL = forward_propagate(parameters, train_X)
        # 成本计算
        cost = compute_cost(train_Y, AL)
        # 后向传播
        backword_propage(parameters, train_Y, AL)
        # 更新参数
        update_parameters(parameters, learning_rate)
        if i % 100 == 0:
            print(f'第{i}次后成本 是 {cost}')
    return parameters


def predict(parameters, X):
    '''
    对特征进行预测
    '''
    prediction = np.zeros((1, X.shape[1]))
    # 向前传播得到成本值
    # 隐藏层使用tanh激活函数
    A = forward_propagate(parameters, X)
    for i in range(A.shape[1]):
        t = A[0, i]
        if t >= 0.5:
            prediction[0, i] = 1
    return prediction


def model(train_X, train_Y, test_X, test_Y, num=2000, learning_rate=0.0075):
    '''
    模型训练
    '''
    #
    # 先得到wb
    dnnConf = [train_X.shape[0], 20, 5, 1, 1]
    parameters = init_parameters(dnnConf)
    # 优化得到理好的w,b
    parameters = optimize(parameters, train_X, train_Y, num, learning_rate)
    # 得到训练及测试预测
    train_prediction = predict(parameters, train_X)
    test_prediction = predict(parameters, test_X)
    # 打印出预测的准确率
    print(
        f'对训练图片的预测准确率为 {100 - np.mean(np.abs(train_prediction - train_Y))*100}')
    print(
        f'对测试图片的预测准确率为 {100 - np.mean(np.abs(test_prediction - test_Y))*100}')
    return parameters


# 转换为0-1间
train_set_x_flatten = train_set_x/255
test_set_x_flatten = test_set_x/255
# 再转换为易计算的n*m矩阵
train_set_X = train_set_x_flatten.reshape(train_set_x_flatten.shape[0], -1).T
train_set_Y = train_set_y.reshape(1, train_set_y.shape[0])
test_set_X = test_set_x_flatten.reshape(test_set_x_flatten.shape[0], -1).T
test_set_Y = test_set_y.reshape(1, test_set_y.shape[0])
parameters = model(train_set_X, train_set_Y, test_set_X, test_set_Y)
my_image = "my_image3.jpg"
fname = "images/" + my_image
image = np.array(plt.imread(fname))
my_image = tf.resize(image, (numpx, numpx),
                     mode='reflect').reshape((1, numpx*numpx*3)).T
my_predicted_image = predict(parameters, my_image)
print("预测结果为 " + str(int(np.squeeze(my_predicted_image))))
plt.imshow(image)
plt.show()
