#五种行为，六种分类（使用手机分为了左右手两种分类）
#ResNet18

import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
from tensorflow.keras import Model
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

#***************************************导入图片数据***********************************************
#实例化一个ImageDataGenerator对象
datagen=ImageDataGenerator(samplewise_center=True,rescale=1.0/255)
                                                        #samplewise_center:使输入数据的每个样本均值为0
                                                        # rescale:使值在0-1之间
#本地数据集路径
ROOT_DIR = "./dataset"
train_dir = os.path.join(ROOT_DIR, 'train')#训练集共13664张
valid_dir = os.path.join(ROOT_DIR, 'valid')#测试集共180张
#设置相应的batch_size
batch_size_train=32
batch_size_valid=10
# 设置送入模型的图片的尺寸
IMG_SIZE = (28,28)
#.flow_from_directory读取文件夹数据
train_generator=datagen.flow_from_directory(
    train_dir,#注意，这里的路径指分类文件夹的上一级目录
    classes=['c0','c1','c3','c5','c6','c9'],#可选参数，为子文件夹的列表(作为标签值)，若未提供，则该类别列表将从directory下的子文件夹名称/结构自动推断
    target_size=IMG_SIZE,#整数tuple，默认为(256, 256)。图像将被resize成该尺寸
    #class_mode='categorical',#标签数组的形式,默认为"categorical(one-hot编码标签)
    class_mode='sparse',
    batch_size=batch_size_train,
    shuffle=True#是否打乱数据，默认为true
)
validation_generator = datagen.flow_from_directory(
    valid_dir,
    classes=['c0','c1','c3','c5','c6','c9'],
    target_size=IMG_SIZE,
    #class_mode='categorical',
    class_mode='sparse',
    batch_size=batch_size_valid,
    shuffle=True
)
print(validation_generator.image_shape)

#**********************************************模型搭建******************************************
# 一个ResNet结构块
# residual_path为false时代表F(x)和x维度相同，为true时代表维度不同，需要对x进行1x1的卷积核做卷积操作
class ResnetBlock(Model):

    def __init__(self, filters, strides=1, residual_path=False):
        super(ResnetBlock, self).__init__()
        self.filters = filters
        self.strides = strides
        self.residual_path = residual_path

        self.c1 = Conv2D(filters, (3, 3), strides=strides, padding='same', use_bias=False)
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')

        self.c2 = Conv2D(filters, (3, 3), strides=1, padding='same', use_bias=False)
        self.b2 = BatchNormalization()

        # residual_path为True时，对输入进行下采样，即用1x1的卷积核做卷积操作，保证x能和F(x)维度相同，顺利相加
        if residual_path:
            self.down_c1 = Conv2D(filters, (1, 1), strides=strides, padding='same', use_bias=False)
            self.down_b1 = BatchNormalization()

        self.a2 = Activation('relu')

    def call(self, inputs):
        residual = inputs  # residual等于输入值本身，即residual=x
        # 将输入通过卷积、BN层、激活层，计算F(x)
        x = self.c1(inputs)
        x = self.b1(x)
        x = self.a1(x)

        x = self.c2(x)
        y = self.b2(x)

        if self.residual_path:
            residual = self.down_c1(inputs)
            residual = self.down_b1(residual)

        out = self.a2(y + residual)  # 最后输出的是两部分的和，即F(x)+x或F(x)+Wx,再过激活函数
        return out


class ResNet18(Model):

    def __init__(self, block_list, initial_filters=64):  # block_list为列表形式，给出每个block有几个卷积层
        super(ResNet18, self).__init__()
        self.num_blocks = len(block_list)  # 共有几个block
        self.block_list = block_list
        self.out_filters = initial_filters
        # 第一层：64个3*3卷积核，步长为1，全零填充
        self.c1 = Conv2D(self.out_filters, (3, 3), strides=1, padding='same', use_bias=False)
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        self.blocks = tf.keras.models.Sequential()
        # 构建ResNet网络结构
        for block_id in range(len(block_list)):  # 第几个resnet block
            for layer_id in range(block_list[block_id]):  # 第几个卷积层

                if block_id != 0 and layer_id == 0:  # 对除第一个block以外的每个block的输入进行下采样
                    block = ResnetBlock(self.out_filters, strides=2, residual_path=True)
                else:
                    block = ResnetBlock(self.out_filters, residual_path=False)
                self.blocks.add(block)  # 将构建好的block加入resnet
            self.out_filters *= 2  # 下一个block的卷积核数是上一个block的2倍
        self.p1 = tf.keras.layers.GlobalAveragePooling2D()
        #全连接层
        self.f1 = tf.keras.layers.Dense(6, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())

    def call(self, inputs):
        x = self.c1(inputs)
        x = self.b1(x)
        x = self.a1(x)
        x = self.blocks(x)
        x = self.p1(x)
        y = self.f1(x)
        return y

#****************************************训练过程************************************************
model = ResNet18([2, 2, 2, 2])
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])
#断点续训
checkpoint_save_path = "./checkpoint/ResNet18.ckpt"#checkpoint文件夹用来存放模型
if os.path.exists(checkpoint_save_path + '.index'):#生成.ckpt文件时会同步生成索引表，故通过判断是否有索引来推断是否已有这个模型文件
    print('-------------load the model-----------------')
    model.load_weights(checkpoint_save_path)#load_weights读取模型
#保存模型
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
                                                 save_weights_only=True,#是否只保留模型参数
                                                 save_best_only=True)#是否只保留最优结果
#开始训练
history = model.fit_generator(generator=train_generator,steps_per_epoch=13664/64+1,epochs=1,validation_data=validation_generator,validation_steps=180/12)
model.summary()
#将模型参数存入文本
file = open('./Model_weights/ResNet18_weights.txt', 'w')#若该文件一开始并不存在那么会自动生成一个新文件
for v in model.trainable_variables:
    file.write(str(v.name) + '\n')
    file.write(str(v.shape) + '\n')
    file.write(str(v.numpy()) + '\n')
file.close()

#****************************************曲线显示***********************************************
# 显示训练集和验证集的acc和loss曲线
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.savefig("./Model_acc_loss/ResNet18.png")#保存图像一定要在show之前,否则只能保存空白图像
plt.show()