#五种行为，六种分类（使用手机分为了左右手两种分类）

import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense, \
    GlobalAveragePooling2D
from tensorflow.keras import Model
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

#***************************************导入图片数据***********************************************
#实例化一个ImageDataGenerator对象
datagen=ImageDataGenerator(samplewise_center=True,rescale=1.0/255)
                                                        #samplewise_center:使输入数据的每个样本均值为0
                                                        # rescale:使值在0-1之间
#本地数据集路径
ROOT_DIR = "./dataset"
train_dir = os.path.join(ROOT_DIR, 'train')#训练集共13664张
valid_dir = os.path.join(ROOT_DIR, 'valid')#测试集共180张
#设置相应的batch_size
batch_size_train=32
batch_size_valid=10
# 设置送入模型的图片的尺寸
IMG_SIZE = (28,28)
#.flow_from_directory读取文件夹数据
train_generator=datagen.flow_from_directory(
    train_dir,#注意，这里的路径指分类文件夹的上一级目录
    classes=['c0','c1','c3','c5','c6','c9'],#可选参数，为子文件夹的列表(作为标签值)，若未提供，则该类别列表将从directory下的子文件夹名称/结构自动推断
    target_size=IMG_SIZE,#整数tuple，默认为(256, 256)。图像将被resize成该尺寸
    #class_mode='categorical',#标签数组的形式,默认为"categorical(one-hot编码标签)
    class_mode='sparse',
    batch_size=batch_size_train,
    shuffle=True#是否打乱数据，默认为true
)
validation_generator = datagen.flow_from_directory(
    valid_dir,
    classes=['c0','c1','c3','c5','c6','c9'],
    target_size=IMG_SIZE,
    #class_mode='categorical',
    class_mode='sparse',
    batch_size=batch_size_valid,
    shuffle=True
)
#**********************************************模型搭建******************************************
#CBA模块
class ConvBNRelu(Model):
    def __init__(self, ch, kernelsz=3, strides=1, padding='same'):
        super(ConvBNRelu, self).__init__()
        self.model = tf.keras.models.Sequential([
            Conv2D(ch, kernelsz, strides=strides, padding=padding),
            BatchNormalization(),
            Activation('relu')
        ])

    def call(self, x):
        x = self.model(x, training=False) #在training=False时，BN通过整个训练集计算均值、方差去做批归一化，training=True时，通过当前batch的均值、方差去做批归一化。推理时 training=False效果好
        return x

#一个Inception结构块
class InceptionBlk(Model):
    def __init__(self, ch, strides=1):#步长默认为1
        super(InceptionBlk, self).__init__()
        self.ch = ch
        self.strides = strides
        self.c1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
        self.c2_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
        self.c2_2 = ConvBNRelu(ch, kernelsz=3, strides=1)
        self.c3_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
        self.c3_2 = ConvBNRelu(ch, kernelsz=5, strides=1)
        self.p4_1 = MaxPool2D(3, strides=1, padding='same')
        self.c4_2 = ConvBNRelu(ch, kernelsz=1, strides=strides)

    def call(self, x):
        x1 = self.c1(x)
        x2_1 = self.c2_1(x)
        x2_2 = self.c2_2(x2_1)
        x3_1 = self.c3_1(x)
        x3_2 = self.c3_2(x3_1)
        x4_1 = self.p4_1(x)
        x4_2 = self.c4_2(x4_1)
        # concat along axis=channel
        x = tf.concat([x1, x2_2, x3_2, x4_2], axis=3)#concat函数将四个分支输出堆叠，axis=3指定堆叠维度延深度方向
        return x

#十层InceptionNet模型
class Inception10(Model):
    def __init__(self, num_blocks, num_classes, init_ch=16, **kwargs):#默认输出深度init_ch为16
                                                                    #**kwargs以字典格式存放
        super(Inception10, self).__init__(**kwargs)
        self.in_channels = init_ch
        self.out_channels = init_ch
        self.num_blocks = num_blocks
        self.init_ch = init_ch
        #第一层：16个3*3卷积核
        self.c1 = ConvBNRelu(init_ch)#调用之前写好的cba模块
        self.blocks = tf.keras.models.Sequential()
        #四个Inception结构块，每两个Inception结构块为一个block
        for block_id in range(num_blocks):
            for layer_id in range(2):
                if layer_id == 0:
                    block = InceptionBlk(self.out_channels, strides=2)#每个block中第一个Inception结构块卷积步长为2
                else:
                    block = InceptionBlk(self.out_channels, strides=1)#每个block中第二个Inception结构块卷积步长为1
                self.blocks.add(block)
            # enlarger out_channels per block
            self.out_channels *= 2#由于第一个block步长为2导致图片尺寸减半，故此处输出特征图深度加深，保证特征抽取信息承载量一致
        self.p1 = GlobalAveragePooling2D()#128个通道的数据送入平均池化
        #第十层全连接层
        self.f1 = Dense(num_classes, activation='softmax')#num_classes个数分类的全连接

    def call(self, x):
        x = self.c1(x)
        x = self.blocks(x)
        x = self.p1(x)
        y = self.f1(x)
        return y

#****************************************训练过程************************************************
model = Inception10(num_blocks=2, num_classes=6)#num_blocks指定了block数量
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])
#断点续训
checkpoint_save_path = "./checkpoint/InceptionNet10.ckpt"#checkpoint文件夹用来存放模型
if os.path.exists(checkpoint_save_path + '.index'):#生成.ckpt文件时会同步生成索引表，故通过判断是否有索引来推断是否已有这个模型文件
    print('-------------load the model-----------------')
    model.load_weights(checkpoint_save_path)#load_weights读取模型
#保存模型
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
                                                 save_weights_only=True,#是否只保留模型参数
                                                 save_best_only=True)#是否只保留最优结果
#开始训练
history = model.fit_generator(generator=train_generator,steps_per_epoch=13664/64+1,epochs=1,validation_data=validation_generator,validation_steps=180/12)
model.summary()
#将模型参数存入文本
file = open('./Model_weights/InceptionNet10_weights.txt', 'w')#若该文件一开始并不存在那么会自动生成一个新文件
for v in model.trainable_variables:
    file.write(str(v.name) + '\n')
    file.write(str(v.shape) + '\n')
    file.write(str(v.numpy()) + '\n')
file.close()

#****************************************曲线显示***********************************************
# 显示训练集和验证集的acc和loss曲线
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.savefig("./Model_acc_loss/InceptionNet10.png")#保存图像一定要在show之前,否则只能保存空白图像
plt.show()


