#!/usr/bin/python3.9
# -*- coding: utf-8 -*-
# @Time    : 2021/10/11 19:12
# @Author  : YHSimon
# 实现反向传播算法学习神经网络的参数 预测手写数字的例子


import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
import scipy.optimize as opt
from sklearn.metrics import classification_report  # 用于评价报告
from sklearn.preprocessing import OneHotEncoder


def load_mat(path):
    data = loadmat(path)
    X = data['X']
    print(data['y'].shape)  # (5000,1)   二维数组
    y = data['y'].flatten()  # (5000,)    一维数组
    print(y.shape)
    return X, y


def expand_y(y):  # 转化为非线性相关的向量  y[0]=6转换为y[0]=[0,0,0,0,0,1,0,0,0,0]
    result = []
    # 把y中的每个类别转为一个向量，对应的label值在向量对应的位置值为1
    for i in y:
        y_array = np.zeros(10)
        y_array[i - 1] = 1
        result.append(y_array)
    """
    或者使用sklearn的OneHotEncoder函数
    encoder=OneHotEncoder(sparse=False) # 返回一个数组 而不是矩阵
    y_onehot=encoder.fit_transform(y.reshape(-1,1)) # 转化为一列 reshape(2,-1) 表示转换为两行
    return y_onehot
    """
    return np.array(result)


def load_weight(path):
    data = loadmat(path)
    return data['Theta1'], data['Theta2']


def serialize(a, b):
    # 展开参数
    # 当我们使用高级优化方法来优化神经网络时，我们需要将多个参数矩阵展开，才能传入优化函数，然后再恢复形状。
    return np.r_[a.flatten(), b.flatten()]


def deserialize(seq):
    return seq[:25 * 401].reshape(25, 401), seq[25 * 401:].reshape(10, 26)


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


def feed_forward(theta, X, ):
    t1, t2 = deserialize(theta)
    # 前面已经插入过偏置单元，这里就不用插入了
    a1 = X
    z2 = a1 @ t1.T
    a2 = np.insert(sigmoid(z2), 0, 1, axis=1)
    z3 = a2 @ t2.T
    a3 = sigmoid(z3)
    return a1, z2, a2, z3, a3


def cost(theta, X, y):
    """
         # or just use verctorization
         J = - y * np.log(h) - (1 - y) * np.log(1 - h)
         return J.sum() / len(X)
    """
    a1, z2, a2, z3, h = feed_forward(theta, X)
    J = 0
    for i in range(len(X)):
        first = - y[i] * np.log(h[i])
        second = (1 - y[i]) * np.log(1 - h[i])
        J = J + np.sum(first - second)
    J = J / len(X)
    return J


def regularized_cost(theta, X, y, l=1):
    """正则化时忽略每层的偏置项，也就是参数矩阵的第一列"""
    t1, t2 = deserialize(theta)
    reg = np.sum(t1[:, 1:] ** 2) + np.sum(t2[:, 1:] ** 2)  # or use np.power(a, 2)
    return l / (2 * len(X)) * reg + cost(theta, X, y)


# 以下为反向传播
def sigmoid_gradient(z):
    return sigmoid(z) * (1 - sigmoid(z))


def random_init(size):
    '''从服从的均匀分布的范围中随机返回size大小的值'''
    return np.random.uniform(-0.12, 0.12, size)


def gradient(theta, X, y):
    '''
    unregularized gradient, notice no d1 since the input layer has no error
    return 所有参数theta的梯度，故梯度D(i)和参数theta(i)同shape，重要。
    '''
    t1, t2 = deserialize(theta)
    a1, z2, a2, z3, h = feed_forward(theta, X)
    d3 = h - y  # (5000, 10)
    d2 = d3 @ t2[:, 1:] * sigmoid_gradient(z2)  # (5000, 25)
    D2 = d3.T @ a2  # (10, 26)
    D1 = d2.T @ a1  # (25, 401)
    D = (1 / len(X)) * serialize(D1, D2)  # (10285,)

    return D


def regularized_gradient(theta, X, y, l=1):
    """不惩罚偏置单元的参数"""
    a1, z2, a2, z3, h = feed_forward(theta, X)
    D1, D2 = deserialize(gradient(theta, X, y))
    t1[:, 0] = 0
    t2[:, 0] = 0
    reg_D1 = D1 + (l / len(X)) * t1
    reg_D2 = D2 + (l / len(X)) * t2

    return serialize(reg_D1, reg_D2)


# 梯度检测
def gradient_checking(theta, X, y, e):
    def a_numeric_grad(plus, minus):
        """
        对每个参数theta_i计算数值梯度，即理论梯度。
        """
        return (regularized_cost(plus, X, y) - regularized_cost(minus, X, y)) / (e * 2)

    numeric_grad = []
    for i in range(len(theta)):
        plus = theta.copy()  # deep copy otherwise you will change the raw theta
        minus = theta.copy()
        plus[i] = plus[i] + e
        minus[i] = minus[i] - e
        grad_i = a_numeric_grad(plus, minus)
        numeric_grad.append(grad_i)

    numeric_grad = np.array(numeric_grad)
    analytic_grad = regularized_gradient(theta, X, y)
    diff = np.linalg.norm(numeric_grad - analytic_grad) / np.linalg.norm(numeric_grad + analytic_grad)

    print(
        'If your backpropagation implementation is correct,\nthe relative difference will be smaller than 10e-9 (assume epsilon=0.0001).\nRelative Difference: {}\n'.format(
            diff))


# Learning parameters using fmincg 优化参数
def nn_training(X, y):
    init_theta = random_init(10285)  # 25*401 + 10*26

    res = opt.minimize(fun=regularized_cost,
                       x0=init_theta,
                       args=(X, y, 1),
                       method='TNC',
                       jac=regularized_gradient,
                       options={'maxiter': 400})
    return res


def accuracy(theta, X, y):
    _, _, _, _, h = feed_forward(res.x, X)
    y_pred = np.argmax(h, axis=1) + 1
    print(classification_report(y, y_pred))


# 可视化隐藏层
def plot_hidden(theta):
    t1, _ = deserialize(theta)
    t1 = t1[:, 1:]
    fig, ax_array = plt.subplots(5, 5, sharex=True, sharey=True, figsize=(6, 6))
    for r in range(5):
        for c in range(5):
            ax_array[r, c].matshow(t1[r * 5 + c].reshape(20, 20), cmap='gray_r')
            plt.xticks([])
            plt.yticks([])
    plt.show()


def plot_100_image(X):
    # 随机画100个数字
    sample_idx = np.random.choice(np.arange(X.shape[0]), 100)  # 随机选100个样本
    sample_images = X[sample_idx, :]  # (100,400)   20×20的像素网格被展开成一个400维的向量
    # sharex和sharey表示坐标轴的属性是否相同  nrows和ncols表示将画布分割成几行几列
    fig, ax_array = plt.subplots(nrows=10, ncols=10, sharey=True, sharex=True, figsize=(8, 8))
    for row in range(10):
        for col in range(10):
            ax_array[row, col].matshow(sample_images[10 * row + col].reshape((20, 20)), cmap='gray_r')
    plt.xticks([])
    plt.yticks([])
    plt.show()


if __name__ == '__main__':
    # 读取数据
    raw_X, raw_y = load_mat('ex4data1.mat')
    print(raw_X)
    X = np.insert(raw_X, 0, 1, axis=1)  # 0表示位置 1 表示值
    print(X.shape)
    y = expand_y(raw_y)
    print(y.shape)

    # 读取权重
    print('-------读取权重---------')
    t1, t2 = load_weight('ex4weights.mat')  # (25,401) (10,26) 第二层有25个单元，输出层有10个单元
    theta = serialize(t1, t2)  # 扁平化参数，25*401+10*26=10285
    print(t1.shape)
    print(t2.shape)
    print(theta.shape)  # (10285,)

    # 前馈和代价函数 注意输出时加一个偏置单元
    a1, z2, a2, z3, h = feed_forward(theta, X)

    # 代价函数 正则化代价函数
    print(cost(theta, X, y))  # 0.2876291651613188
    print(regularized_cost(theta, X, y, 1))  # 0.38376985909092354

    # 梯度检测
    # gradient_checking(theta, X, y, 0.0001)  # 这个运行很慢，谨慎运行
    res = nn_training(X, y)  # 慢
    print(res)

    accuracy(res.x, X, raw_y)
    plot_hidden(res.x)
