import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
import pickle
import layer
import losses
import activations


class Mnist:
    Train = []  # 训练集
    Test = []  # 验证集
    Parameters = {}  # 参数矩阵

    def __init__(self, file_path):
        self.loadData(file_path)
        self.nodes = {}
        self.gradients = {}

        self.plt_x_test = []
        self.plt_acc_test = []
        self.plt_loss_test = []

        self.plt_x= []
        self.plt_acc = []
        self.plt_loss = []


    # 读入minst训练集数据
    def loadData(self, file_path):
        # 使用scipy读入mat文件数据
        mnist_all = sio.loadmat(file_path)
        train_raw = []
        test_raw = []
        # 依次读入数据集0-9
        for i in range(10):
            train_temp = mnist_all["train" + str(i)]
            for j in train_temp:
                train_raw.append([j, i])
        for i in range(10):
            test_temp = mnist_all["test" + str(i)]
            for j in test_temp:
                test_raw.append([j, i])

        # 随机打乱数据
        Train = np.array(train_raw)
        np.random.shuffle(Train)
        Test = np.array(test_raw)
        np.random.shuffle(Test)
        self.Train = Train
        self.Test = Test

    # 批量获取训练数据集
    def getBatchTrain(self, is_train=True, batch_size=16, offset=0):
        if is_train:
            data = self.Train
        else:
            data = self.Test
        X = []
        Y = []
        y_temp = np.eye(10)
        length = data.shape[0]
        for i in range(batch_size):
            X.append(data[(offset + i) % length][0])
            Y.append(y_temp[data[(offset + i) % length][1]])
        X = np.array(X).T
        Y = np.array(Y).T
        #     归一化
        X = X / 225.0
        return X, Y

    def initParam(self,hidden,init_type="xavier"):
        # 初始化W
        if init_type == "xavier":
            self.Parameters["W1"] = np.random.randn(hidden, 784) / np.sqrt(hidden / 2)
            self.Parameters["W2"] = np.random.randn(10, hidden) / np.sqrt(hidden / 2)
            # self.Parameters["W3"] = np.random.randn(10, 128) / np.sqrt(10 / 2)
        elif init_type == "normal":
            self.Parameters["W1"] = np.random.normal(loc=0.0, scale=1.0, size=(hidden, 784)) * 0.01
            self.Parameters["W2"] = np.random.normal(loc=0.0, scale=1.0, size=(10, hidden)) * 0.01
            # self.Parameters["W3"] = np.random.normal(loc=0.0, scale=1.0, size=(10, 128)) * 0.01
        elif init_type == "rand":
            self.Parameters["W1"] = np.random.rand(hidden, 784) * 0.01
            self.Parameters["W2"] = np.random.rand(10, hidden) * 0.01
            # self.Parameters["W3"] = np.random.rand(10, 128) * 0.01
        else:
            raise Exception("无效的参数初始化类型")

        # 初始化b
        self.Parameters["B1"] = np.zeros((hidden, 1))
        self.Parameters["B2"] = np.zeros((10, 1))
        # self.Parameters["B3"] = np.zeros((10, 1))

    def forward(self, train_data):
        self.nodes["A1"] = layer.fc_forward(self.Parameters["W1"], train_data, self.Parameters["B1"])
        self.nodes["Z1"] = activations.relu_forward(self.nodes["A1"])
        self.nodes["A2"] = layer.fc_forward(self.Parameters["W2"], self.nodes["Z1"], self.Parameters["B2"])
        self.nodes["Z2"] = activations.relu_forward(self.nodes["A2"])
        # self.nodes["A3"] = layer.fc_forward(self.Parameters["W3"], self.nodes["Z2"], self.Parameters["B3"])
        # self.nodes["Z3"] = activations.relu_forward(self.nodes["A3"])
        self.nodes["y"] = np.argmax(self.nodes["A2"], axis=0)
        return self.nodes["y"]

    def backward(self, train_data, y_true,lamdb):
        loss, self.gradients["A2"] = losses.cross_entropy_loss(self.nodes["A2"], y_true)
        # self.gradients["W3"], self.gradients["B3"], self.gradients["Z2"] = \
        #     layer.fc_backward(self.gradients["A3"], self.Parameters["W3"], self.nodes["Z2"])

        # self.gradients["A2"] = activations.relu_backward(self.gradients["Z2"].T, self.nodes["A2"])
        self.gradients["W2"], self.gradients["B2"], self.gradients["Z1"] = \
            layer.fc_backward(self.gradients["A2"], self.Parameters["W2"], self.nodes["Z1"],lamdb)

        self.gradients["A1"] = activations.relu_backward(self.gradients["Z1"].T, self.nodes["A1"])
        self.gradients["W1"], self.gradients["B1"], self.gradients["Z1"] = \
            layer.fc_backward(self.gradients["A1"], self.Parameters["W1"], train_data,lamdb)

        return loss
    def myloss(self, y_true):
        loss, self.gradients["A2"] = losses.cross_entropy_loss(self.nodes["A2"], y_true)
        return loss
    def getAcc(self, test_data, y_true):
        y_predict = self.forward(test_data)
        acc = np.mean(y_predict == y_true)
        return acc

    def saveModel(self, file_path):
        pickle.dump(self.Parameters, open(file_path, 'wb'))

    def loadModel(self, file_path):
        self.Parameters = pickle.load(open(file_path, 'rb'))
        return self.Parameters
    # 最终结果
    def dev(self,batch_size):
        acc_arr = []
        loss_arr=[]

        for i in range(len(self.Test) // batch_size):
            X, Y = self.getBatchTrain(is_train=False, batch_size=batch_size, offset=i * batch_size)
            y_true = np.argmax(Y, axis=0)
            y_predict = self.forward(X)
            # loss, _ = losses.cross_entropy_loss(self.nodes["A2"], y_true)
            # loss = self.backward(Y)
            loss=self.myloss(Y)
            # loss, _ = losses.cross_entropy_loss(y_predict,y_true)
            test_acc = self.getAcc(X, y_true)
            acc_arr.append(test_acc)
            loss_arr.append(loss)

            # if i % 200 == 0:
            #     # y_true = np.argmax(Y, axis=0)
            #     # acc = self.getAcc(X, y_true)
            #     print("第" + str(i) + "次测试集准确率为：" + str(test_acc))
            #     print("第" + str(i) + "次测试集损失为：" + str(loss))
            #     self.plt_x_test.append(i)
            #     self.plt_acc_test.append(test_acc)
            #     self.plt_loss_test.append(loss)
            
        # print("第" + str(i) + "次测试集准确率为：" + str(test_acc))
        # print("第" + str(i) + "次测试集损失为：" + str(loss))
        # self.plt_x_test.append(i)
        # self.plt_acc_test.append(test_acc)
        # self.plt_loss_test.append(loss)
        # print("验证集准确率为：" + str(np.mean(acc_arr)))
        return np.mean(acc_arr),np.mean(loss_arr)
   

    # 每次训练
    def train(self, epoch, batch_size, lr,lamdb):
        # plt_x = []
        # plt_acc = []
        # plt_loss = []
        # 记录总损失函数
        liwei=[]
        total_loss_train,total_acc=0,0
        # 记录轮次
        epo=1
        num=0
        for i in range(epoch * 60000 // batch_size):
            num+=1
            X, Y = self.getBatchTrain(batch_size=batch_size, offset=i * batch_size)

            self.forward(X)
            loss = self.backward(X, Y,lamdb)
            liwei.append(loss)
            # 记录总损失函数
            total_loss_train+=loss
            y_true = np.argmax(Y, axis=0)

            acc = self.getAcc(X, y_true)
            total_acc+=acc
            # print("y_true:")
            # print(y_true)

            # 参数更新-sgd
            self.Parameters["W2"] -= lr * self.gradients["W2"]
            self.Parameters["B2"] -= lr * self.gradients["B2"]
            self.Parameters["W1"] -= lr * self.gradients["W1"]
            self.Parameters["B1"] -= lr * self.gradients["B1"]

            # 每一轮次输出参数 再输出测试集的数据 
            # 此时刚好一个轮次
            if i % (60000//batch_size) == 0 and i!=0:
                # 平均损失率
                train_loss=total_loss_train/num
                train_acc=total_acc/num
                # y_true = np.argmax(Y, axis=0)
                print("=======")
                # print([total_loss_train,num])
                # print(liwei)
                # print(self.nodes["A2"])
               
                print("第" + str(epo) + "次训练集准确率为：" + str(train_acc))
                print("第" + str(epo) + "次训练集损失为：" + str(train_loss))
                self.plt_x.append(epo)
                self.plt_acc.append(train_acc)
                self.plt_loss.append(train_loss)


               
                # 测试集
                test_acc,test_loss=self.dev(batch_size)
                print("第" + str(epo) + "次测试集准确率为：" + str(test_acc))
                print("第" + str(epo) + "次测试集损失为：" + str(test_loss))
                self.plt_x_test.append(epo)
                self.plt_acc_test.append(test_acc)
                self.plt_loss_test.append(test_loss)
                # 清0
                total_loss_train,total_acc,num=0,0,0
                epo+=1
                # liwei=[]
        return train_acc,train_loss,test_acc,test_loss


            
    def plot(self):
        plt.figure()
        plt.xlabel('epoch')
        plt.ylabel('ACC')
        plt.plot(self.plt_x, self.plt_acc, linewidth=2, color='blue', linestyle='--',label="train")
        plt.plot(self.plt_x_test, self.plt_acc_test, linewidth=2, color='red', linestyle='-.',label="test")
        plt.legend()  # 展示图例legend
        plt.savefig('./acc.jpg')
        plt.show()
        
        plt.figure()
        plt.xlabel('epoch')
        plt.ylabel('Loss')
        plt.plot(self.plt_x, self.plt_loss, linewidth=2, color='blue', linestyle='--',label="train")
        plt.plot(self.plt_x_test, self.plt_loss_test, linewidth=2, color='red', linestyle='-.',label="test")
        plt.legend()  # 展示图例legend
        plt.savefig('./loss.jpg')
        plt.show()
    



if __name__ == '__main__':
    # 设置超参数
    epoch = 12
    lr = 0.1
    batch_size = 16
    lamdb= 0.00001
    hidden0=512

    mnist = Mnist("/remote-home/liwei/神经网络张力/mytest/data/mnist_all.mat")
    mnist.initParam(hidden=hidden0)
    train_acc,train_loss,test_acc,test_loss=mnist.train(epoch, batch_size, lr,lamdb)
    mnist.saveModel("model1.para")
    # mnist.loadModel("/remote-home/liwei/神经网络张力/fcn-base-numpy-master/model1.para")
    
    mnist.plot()
    print([train_acc,train_loss,test_acc,test_loss])
