# %%
import numpy as np
import scipy.special
import matplotlib.pyplot as plt


# %%
class NeuralNetwork:
    def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
        self.input_nodes = input_nodes
        self.hidden_nodes = hidden_nodes
        self.output_nodes = output_nodes
        self.learning_rate = learning_rate
        # 创建权重矩阵
        # self.weight_ij = np.random.rand(self.hidden_nodes,self.input_nodes) - 0.5
        # self.weight_jk = np.random.rand(self.output_nodes,self.hidden_nodes) - 0.5
        self.weight_ij = np.random.normal(0.0, pow(self.hidden_nodes, -0.5), (self.hidden_nodes, self.input_nodes))
        self.weight_jk = np.random.normal(0.0, pow(self.output_nodes, -0.5), (self.output_nodes, self.hidden_nodes))
        # 创建激活函数
        self.activate_function = lambda x: scipy.special.expit(x)

    def train(self, inputs_list, target_list):
        # 将输入和目标转换为二维列矩阵
        inputs = np.array(inputs_list, ndmin=2).T
        target = np.array(target_list, ndmin=2).T
        # 输入层和隐藏层之间进行计算
        hidden_input = np.dot(self.weight_ij, inputs)
        hidden_output = self.activate_function(hidden_input)
        # 隐藏层和输出层之间进行计算
        final_input = np.dot(self.weight_jk, hidden_output)
        final_output = self.activate_function(final_input)

        # 计算输出层节点误差值
        output_error = target - final_output
        # 为隐藏层节点分配误差
        hidden_error = np.dot(self.weight_jk.T, output_error)

        # 重新调整隐藏层和输出层之间的权重
        self.weight_jk += self.learning_rate * np.dot(
            (output_error * final_output * (1 - final_output)),
            hidden_output.T)
        # 重新调整输入层和隐藏层之间的权重
        self.weight_ij += self.learning_rate * np.dot(
            (hidden_error * hidden_output * (1 - hidden_output)),
            inputs.T)

    def query(self, inputs_list):
        # 将输入转换为二维列矩阵
        inputs = np.array(inputs_list, ndmin=2).T
        # 输入层和隐藏层之间进行计算
        hidden_input = np.dot(self.weight_ij, inputs)
        hidden_output = self.activate_function(hidden_input)
        # 隐藏层和输出层之间进行计算
        final_input = np.dot(self.weight_jk, hidden_output)
        final_output = self.activate_function(final_input)

        return final_output


# %%
test = NeuralNetwork(784, 100, 10, 0.1)

date_file = open("./mnist_train.csv", 'r')
date_list = date_file.readlines()
date_file.close()
time = 0
generate = 4
for i in range(generate):
    for record in date_list:
        time += 1
        char_values = record.split(',')
        int_values = np.asfarray(char_values[1:]) / 255.0 * 0.99 + 0.01

        # plt.imshow(int_values,cmap="Greys",interpolation="None")
        target = np.zeros(10) + 0.1
        target[int(char_values[0])] = 0.99
        test.train(int_values, target)
        # print(f"训练次数为:{time}")
np.savez("model_parameters.npz", para_ij=test.weight_ij, para_jk=test.weight_jk)


# %%
test_file = open("./mnist_test.csv", 'r')
test_list = test_file.readlines()
test_file.close()
score = []
test2 = NeuralNetwork(784, 100, 10, 0.1)
data = np.load("./model_parameters.npz")
test2.weight_ij = data["para_ij"]
test2.weight_jk = data["para_jk"]
import time

for record in test_list:
    test_char_values = record.split(',')
    test_int_values = np.asfarray(test_char_values[1:]) / 255.0 * 0.99 + 0.01
    predict = np.argmax(test2.query(test_int_values))
    reality = int(test_char_values[0])
    if predict == reality:
        score.append(1)
    else:
        score.append(0)
    # plt.imshow(test_int_values.reshape((28,28)),cmap="Greys")
    # print(f"predict:{predict},reality{reality}")


# %%
score_array = np.array(score)
accuracy = score_array.sum() / score_array.size
print(accuracy)

# 寻找预测错误的测试集下标
# zero_index = [index for index,value in enumerate(score) if value == 0]
# print(zero_index)
# print(len(zero_index))


# %%
# print(test.weight_ij)
plt.imshow(test.weight_ij, cmap="Reds")
plt.colorbar()
plt.show()