import numpy
import scipy.special
import matplotlib.pyplot


class neuralNetwork :

    # initialise神经网络
    def __init__(self,inputnodes, hiddennodes, outputnodes, learningrate):
        # set number of nodes
        self.inodes = inputnodes
        self.hnodes = hiddennodes
        self.onodes = outputnodes

        self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
        self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
        # learning rateb
        self.lr = learningrate
        # 定义函数 sigmoid
        self.activation_function = lambda x: scipy.special.expit(x)
        pass

    # 训练神经网络
    def train(self, inputs_list, targets_list):
        inputs = numpy.array(inputs_list, ndmin=2).T
        targets = numpy.array(targets_list, ndmin=2).T

        # 计算隐藏层
        hidden_inputs = numpy.dot(self.wih, inputs)
        hidden_outputs = self.activation_function(hidden_inputs)

        # 最终一层
        final_inputs = numpy.dot(self.who, hidden_outputs)
        final_outputs = self.activation_function(final_inputs)

        # 误差是目标值和最终输出值的差
        output_errors = targets - final_outputs

        # 根据权重分配误差
        hidden_errors = numpy.dot(self.who.T, output_errors)
        # 依据隐藏层和输出层，进行权重更新
        self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)),
                                        numpy.transpose(hidden_outputs))
        self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)),
                                        numpy.transpose(inputs))

        pass

    # 查询网络
    def query(self, inputs_list):
        # 输入的转置
        inputs = numpy.array(inputs_list, ndmin=2).T
        # 隐藏节点的输入为  输入到隐藏结点的权重*输入的转置
        hidden_inputs = numpy.dot(self.wih, inputs)
        # 隐藏节点的输出为输入乘以阈值函数
        hidden_outputs = self.activation_function(hidden_inputs)

        final_inputs = numpy.dot(self.who, hidden_outputs)
        final_outputs = self.activation_function(final_inputs)
        # 返回最终的输出结果
        return final_outputs


input_nodes = 784
hidden_nodes = 200
output_nodes = 10

learning_rate = 0.2

n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

training_data_file = open('C:\\Users\\kai\\Desktop\\邮科院学习\\神经网络数据\\mnist_train_100.csv','r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# 开始实际训练
epochs = 5
for e in range(epochs):
    for record in training_data_list:
        all_values = record.split(',')
        inputs = (numpy.asfarray(all_values[1:])/255.0*0.99)+0.01
        targets = numpy.zeros(output_nodes)+0.01
        targets[int(all_values[0])]=0.99
        n.train(inputs, targets)
        pass
    pass

# 载入测试部分
# test_data_file = open('C:\\Users\\kai\\Desktop\\邮科院学习\\神经网络数据\\mnist_test_10.csv','r')
# test_data_list = test_data_file.readlines()
# test_data_file.close()

# 输出部分
# scorecard = []
# for record in test_data_list:
#     all_values = record.split(',')
#     correct_label = int(all_values[0])
#     inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
#     outputs = n.query(inputs)
#     label = numpy.argmax(outputs)
#     print(outputs)
#     if (label == correct_label):
#         scorecard.append(1)
#     else:
#         scorecard.append(0)
#         pass
#     pass
# all_values = test_data_list[5].split(',')
# # 正确的数字
# correct_label = all_values[0]
# # 整理格式化输入列表
# inputs = (numpy.asfarray(all_values[1:]) / 255 * 0.99) + 0.01
# # 获取输出结果
# outputs = n.query(inputs)
# # 从输出结果中获取标记出来的数字
# label = numpy.argmax(outputs)
# print(outputs)
# print("图像中的数字是：", label)
# image_array = numpy.asfarray(all_values[1:]).reshape(28, 28)
# matplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation='None')
# matplotlib.pyplot.show()