# 模型主类
import pickle
import numpy as np
import time
import random
from matplotlib import pyplot as plt
from Conv import Conv
from ActivationFunc import ReLU, Sigmoid
from Pooling import MeanPool, MaxPool
from Flatten import Flatten
from FullyConnected import FullyConnected
from Softmax import Softmax
NEAR_0 = 1e-10


class CNN():
    def __init__(self, picShape, classNum, epochNum=1, learning_rate=0.001, workerNum=3):
        self.workerNum = workerNum  # 工作线程数
        self.classNum = classNum  # 分类数量，0~9，因此是10个分类
        self.layers = []  # 用于存储各层参数
        self.picShape = picShape  # 初始化的时候，需要手动输入图片的高和宽，用来推测后面的一系列参数
        self.epochNum = epochNum  # 训练轮数
        self.learning_rate = learning_rate  # 学习率
        self.createNetwork()

    def createNetwork(self):
        print("开始初始化神经网络")
        conv_layer1 = Conv([1, self.picShape[0], self.picShape[1], self.picShape[2]],
                        kernel_num=5, conv_stride=1, receptive_field=5, learning_rate=self.learning_rate)
        print("第一个卷积层的输出形状是", conv_layer1.output_shape)
        active_layer1 = ReLU()

        pooling_layer1 = MaxPool(conv_layer1.output_shape, pooling_size=2, pooling_stride=2)
        print("第一个池化层的输出形状是", pooling_layer1.output_shape)

        conv_layer2 = Conv(pooling_layer1.output_shape,
                        kernel_num=10, conv_stride=1, receptive_field=3, learning_rate=self.learning_rate)
        print("第二个卷积层的输出形状是", conv_layer2.output_shape)
        active_layer2 = ReLU()

        pooling_layer2 = MaxPool(conv_layer2.output_shape, pooling_size=2, pooling_stride=2)
        print("第二个池化层的输出形状是", pooling_layer2.output_shape)

        flatten_layer = Flatten()

        batch_size, channel_num, height, width = pooling_layer2.output_shape
        feature_num = channel_num * height * width
        # print("softmax的特征数是", feature_num)
        fc_layer = FullyConnected(feature_num, self.classNum)

        sm_layer = Softmax()

        self.layers = [conv_layer1, active_layer1, pooling_layer1, conv_layer2, active_layer2, pooling_layer2, flatten_layer, fc_layer, sm_layer]

    # 更新学习率
    def update_learning_rate(self, learning_rate):
        # print("正在更新学习率")
        self.learning_rate = learning_rate
        for i in range(len(self.layers)):
            self.layers[i].update_learning_rate(learning_rate)

    def shuffleData(self, inputList, outputList, rate=0.2):
        indexList = list(range(len(inputList)))
        random.shuffle(indexList)
        indexList = indexList[:int(rate * len(indexList))]
        resInput, resOutput = inputList[indexList], outputList[indexList]
        return resInput, resOutput

    # 训练过程中需要使用的预测函数，会把各层的输出保留起来，用于计算梯度和误差
    def forward(self, an_image):
        for layer in self.layers:
            an_image = layer.forward(an_image)
        return an_image

    def do_train(self, an_image, realLabel):
        outputVector = self.forward(an_image)  # 执行一次前向过程，记录每一层的输入和输出
        # softmax层的梯度单独计算
        error_from_later_layer = self.layers[-1].backward(realLabel)
        for i in reversed(range(len(self.layers) - 1)):  # 从后向前遍历各层
            thisLayer = self.layers[i]  # 本层神经元
            error_from_later_layer = thisLayer.backward(error_from_later_layer)

    # 更新参数
    def update_weights(self):
        for i in range(len(self.layers)):
            # print("正在更新第", i, '层的参数')
            self.layers[i].update_weights()

    # 计算损失值
    def cal_loss(self, trainingImageList, trainingLabelList):
        loss = 0
        batch_size = 10
        for j in range(0, min([20, trainingImageList.shape[0]]), batch_size):
            an_image = trainingImageList[j:j+batch_size].reshape(
                (batch_size, trainingImageList.shape[1], trainingImageList.shape[2], trainingImageList.shape[3]))  # CNN的输入，第一维对应的是上一层的卷积核，
            label = trainingLabelList[j:j+batch_size]
            predLabel = self.forward(an_image)
            # 计算交叉熵损失，公式可推导，+NEAR_0是为了防止下溢
            loss -= np.sum(np.log(predLabel[np.where(label == 1)] + NEAR_0))
        return loss

    # 画出训练过程中随着训练样本数的增加，误差的变化图
    def show_loss(self, x_list, y_list):
        plt.plot(x_list, y_list, linestyle="-", color="b")  # 训练数据结果，“-”代表实线，“b“代表蓝色
        plt.title("Training_loss")
        plt.xlabel("epoch")  # 显示x轴标签
        plt.ylabel("loss")  # 显示y轴标签
        plt.legend(["loss"], loc="upper right")
        plt.show()

    # 训练模型
    def fit(self, trainingImageList, trainingLabelList, batch_size):
        trainingImageList, trainingLabelList = np.array(trainingImageList), np.array(trainingLabelList)
        loss_list = []
        epoch_list = []
        for epoch in range(self.epochNum):
            t1 = time.time()
            trainingImageList, trainingLabelList = self.shuffleData(trainingImageList, trainingLabelList, rate=1)
            for i in range(0, len(trainingImageList), batch_size):
                an_image, realLabel = trainingImageList[i:i+batch_size], trainingLabelList[i:i+batch_size]
                an_image = an_image.reshape((batch_size, an_image.shape[1], an_image.shape[2], an_image.shape[3]))
                # print("an_image", an_image, an_image.shape)
                self.do_train(an_image, realLabel)
                self.update_weights()

                if i % 500 == 0:
                    loss = self.cal_loss(trainingImageList, trainingLabelList)
                    print("已经学习了", epoch, '轮, loss', loss, '本轮的进度是', i, '/', len(trainingImageList))
                    pickle.dump(self, open('cnnsoftmax.pkl', 'wb'))
                    epoch_list.append(i / trainingImageList.shape[0] + epoch)
                    loss_list.append(loss)
                    self.show_loss(epoch_list, loss_list)
            t2 = time.time()
            loss = self.cal_loss(trainingImageList, trainingLabelList)
            print("完成了本轮的训练", epoch, '轮, loss为', loss, "耗时", t2-t1)
