#五种行为，六种分类（使用手机分为了左右手两种分类）

import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
from tensorflow.keras import Model
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

#***************************************导入图片数据***********************************************
#实例化一个ImageDataGenerator对象
datagen=ImageDataGenerator(samplewise_center=True,rescale=1.0/255)
                                                        #samplewise_center:使输入数据的每个样本均值为0
                                                        # rescale:使值在0-1之间
#本地数据集路径
ROOT_DIR = "./dataset"
train_dir = os.path.join(ROOT_DIR, 'train')#训练集共13664张
valid_dir = os.path.join(ROOT_DIR, 'valid')#测试集共180张
#设置相应的batch_size
batch_size_train=32
batch_size_valid=10
# 设置送入模型的图片的尺寸
IMG_SIZE = (28,28)
#.flow_from_directory读取文件夹数据
train_generator=datagen.flow_from_directory(
    train_dir,#注意，这里的路径指分类文件夹的上一级目录
    classes=['c0','c1','c3','c5','c6','c9'],#可选参数，为子文件夹的列表(作为标签值)，若未提供，则该类别列表将从directory下的子文件夹名称/结构自动推断
    target_size=IMG_SIZE,#整数tuple，默认为(256, 256)。图像将被resize成该尺寸
    #class_mode='categorical',#标签数组的形式,默认为"categorical(one-hot编码标签)
    class_mode='sparse',
    batch_size=batch_size_train,
    shuffle=True#是否打乱数据，默认为true
)
validation_generator = datagen.flow_from_directory(
    valid_dir,
    classes=['c0','c1','c3','c5','c6','c9'],
    target_size=IMG_SIZE,
    #class_mode='categorical',
    class_mode='sparse',
    batch_size=batch_size_valid,
    shuffle=True
)
print(validation_generator.image_shape)

#**********************************************模型搭建******************************************
#AlexNet8共有八层
class AlexNet8(Model):
    def __init__(self):
        super(AlexNet8, self).__init__()
        #层1
        self.c1 = Conv2D(filters=96, kernel_size=(3, 3))#96个3*3卷积核，步长为1，不使用全零填充
        self.b1 = BatchNormalization()#BN
        self.a1 = Activation('relu')
        self.p1 = MaxPool2D(pool_size=(3, 3), strides=2)
                                                        #不使用dropout
        #层2
        self.c2 = Conv2D(filters=256, kernel_size=(3, 3))
        self.b2 = BatchNormalization()
        self.a2 = Activation('relu')
        self.p2 = MaxPool2D(pool_size=(3, 3), strides=2)
        #层3
        self.c3 = Conv2D(filters=384, kernel_size=(3, 3), padding='same',
                         activation='relu')
        #层4
        self.c4 = Conv2D(filters=384, kernel_size=(3, 3), padding='same',
                         activation='relu')
        #层5
        self.c5 = Conv2D(filters=256, kernel_size=(3, 3), padding='same',
                         activation='relu')
        self.p3 = MaxPool2D(pool_size=(3, 3), strides=2)

        self.flatten = Flatten()
        #层6
        self.f1 = Dense(2048, activation='relu')
        self.d1 = Dropout(0.5)
        #层7
        self.f2 = Dense(2048, activation='relu')
        self.d2 = Dropout(0.5)
        #层8
        self.f3 = Dense(6, activation='softmax')#六分类所以最后一层6个神经元

    def call(self, x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)
        x = self.p1(x)

        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p2(x)

        x = self.c3(x)

        x = self.c4(x)

        x = self.c5(x)
        x = self.p3(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.d1(x)
        x = self.f2(x)
        x = self.d2(x)
        y = self.f3(x)
        return y

#****************************************训练过程************************************************
model = AlexNet8()
#优化器、损失函数等的配置
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])
#断点续训
checkpoint_save_path = "./checkpoint/AlexNet8.ckpt"#checkpoint文件夹用来存放模型
if os.path.exists(checkpoint_save_path + '.index'):#生成.ckpt文件时会同步生成索引表，故通过判断是否有索引来推断是否已有这个模型文件
    print('-------------load the model-----------------')
    model.load_weights(checkpoint_save_path)#load_weights读取模型
#保存模型
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
                                                 save_weights_only=True,#是否只保留模型参数
                                                 save_best_only=True)#是否只保留最优结果
#开始训练
history = model.fit_generator(generator=train_generator,steps_per_epoch=13664/32,epochs=1,validation_data=validation_generator,validation_steps=180/10)
model.summary()
#将模型参数存入文本
file = open('./Model_weights/Alexnet_weights.txt', 'w')#若该文件一开始并不存在那么会自动生成一个新文件
for v in model.trainable_variables:
    file.write(str(v.name) + '\n')
    file.write(str(v.shape) + '\n')
    file.write(str(v.numpy()) + '\n')
file.close()

#****************************************曲线显示***********************************************
# 显示训练集和验证集的acc和loss曲线
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.savefig("./Model_acc_loss/AlexNet8.png")#保存图像一定要在show之前,否则只能保存空白图像
plt.show()Alexnet.py