import numpy as np
import csv
from tqdm import trange
from sklearn.metrics import classification_report, confusion_matrix
def sigmoid(x):
    return 1.0 / (1.0 + np.exp(-x))

def sigmoid_derivative(x):
    return sigmoid(x) * (1 - sigmoid(x))

class Net:

    def __init__(self, layers):
        self.active = sigmoid  # 激活函数
        self.active_d = sigmoid_derivative  # 激活函数的求导
        self.weights = []  # 权值参数
        self.bias = []  # 阈值参数
        for i in range(1, len(layers)):  # 参数初始化
            self.weights.append(np.random.randn(layers[i - 1], layers[i]))
            self.bias.append(np.random.randn(layers[i]))

    def train(self, x, label, learning_rate):
        y = [x]  # 保存每层激活后的输出值
        # 正向传播
        for i in range(len(self.weights)):
            y.append(self.active(np.dot(y[-1], self.weights[i]) + self.bias[i]))

        # 反向传播
        e = (y[-1] - label)
        deltas = [e * y[-1] * (1 - y[-1])]  # 输出层Delta值，sigmod的导数=𝑓(𝑥)⋅[1−𝑓(𝑥)]
        # 各隐藏层Delta值
        for i in range(1, len(self.weights)):
            deltas.append(np.dot(deltas[-1], self.weights[-i].T) * y[-i - 1] * (1 - y[-i - 1]))
        # 误差项倒置
        deltas.reverse()
        # 更新参数w和b
        for i in range(len(self.weights)):
            y_2d = np.atleast_2d(y[i])
            deltas_2d = np.atleast_2d(deltas[i])
            self.weights[i] -= learning_rate * np.dot(y_2d.T, deltas_2d)
            self.bias[i] -= learning_rate * deltas[i]

    def predict(self, x):
        # 正向传播预测输出值
        y = x
        for i in range(len(self.weights)):
            y = self.active(np.dot(y, self.weights[i]) + self.bias[i])
        # _ = [str(round((i / y.sum()) * 100, 2)) + "%" for i in y]  # 将输出结果以概率的形式展现
        _ = None
        out = list(y)
        result = out.index(max(out))
        return result, _


if __name__ == '__main__':

    Net = Net([784, 50, 10])

    # # 读取数据
    # file_name = "data/train.csv"  # 数据集为42000张带标签的28x28手写数字图像
    # x_train = []  # 训练集样本数据特征
    # y_train = []  # 训练集样本标签
    # '''
    #     测试集用来测试模型质量必不可少。
    #     验证集用来调节超参数，如神经网络层数，各层神经节点数、学习率等人为设置而不是模型通过学习得到的参数，可省略，仅用训练集即可
    # '''
    # x_test = []  # 测试集样本数据特征
    # y_test = []  # 测试集样本标签
    # with open(file_name, 'r') as f:
    #     reader = csv.reader(f)
    #     header_row = next(reader)
    #     for row in reader:
    #         data = np.array(row[1:], dtype=np.float_)
    #         if np.random.random() < 0.5:  # 划分数据集 训练集：测试集 ≈ 1:1
    #             x_train.append(data / ((data ** 2).sum() ** 0.5))  # 每个样本对应的784个特征数值归一化
    #             y_train.append(int(row[0]))
    #         else:
    #             x_test.append(data / ((data ** 2).sum() ** 0.5))
    #             y_test.append(int(row[0]))
    #
    # print("训练样本数：{}，测试样本数：{}".format(len(y_train), len(y_test)))

    # Load data from https://www.openml.org/d/554 （下载速度太慢）
    # X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
    data_path = "D:/Data"
    loaded = np.load(f'{data_path}/mnist/mnist_train.npz')
    train_data = loaded['data']
    print("train set: ", train_data.shape)

    loaded = np.load(f'{data_path}/mnist/mnist_test.npz')
    test_data = loaded['data']
    print("test set:", test_data.shape)

    X_train, y_train = train_data[:, 1:], train_data[:, 0].astype(dtype=np.int32)
    print("X_train = ", X_train.shape, "y_train = ", y_train.shape)
    X_test, y_test = test_data[:, 1:], test_data[:, 0].astype(dtype=np.int32)
    print("X_train = ", X_test.shape, "y_train = ", y_test.shape)

    X_train = X_train / 255.
    X_test = X_test / 255.

    # 训练
    learning_rate = 0.5  # 初始学习率
    epochs = 5  # 训练轮数
    for i in trange(len(y_train) * epochs):
        if (i + 1) % len(y_test) == 0:
            learning_rate *= 0.9  # 每训练一轮 学习率衰减
        label = np.zeros(10)
        label[y_train[i % len(y_train)]] = 1  # 设置label
        Net.train(X_train[i % len(y_train)], label, learning_rate)  # 更新网络参数

    # 预测
    count = 0
    preds = []
    for i in trange(len(y_test)):
        #print("-------第{}个测试样本-----------".format(i + 1))
        y_predict, _ = Net.predict(X_test[i])
        #print("概率分布:{}".format(_))
        #print("预测值：{}，真实值：{}".format(predict, y_test[i]))
        if y_predict == y_test[i]:
            count += 1
        preds.append(y_predict)
    # y_pred = np.argmax(y_predict, axis=0)
    print("正确率：{}".format(count / len(y_test)))
    print("混淆矩阵：")
    print(confusion_matrix(y_test, preds))