import pandas as pd
import numpy as np
from tensorflow.keras.layers import Conv2D,Dense,Dropout,Flatten,MaxPooling2D
from keras.utils import np_utils
from keras.models import Sequential
#读取csv文件
data=pd.read_csv('fer2013.csv')
#创建永不相见的训练集和测试集
x_train,y_train,x_test,y_test=[],[],[],[]
#原fer2013文件中已经划分出训练集和测试集，我们只需根据关键字添加即可
for index,row in data.iterrows():
    val=row['pixels'].split(' ')
    if 'Training' in row['Usage']:
        x_train.append(np.array(val,'float32'))
        y_train.append(row['emotion'])
    elif 'PublicTest' in row['Usage']:
        x_test.append(np.array(val,'float32'))
        y_test.append(row['emotion'])
#转换数据类型,且定义为32位浮点数
x_train=np.array(x_train,'float32')
y_train=np.array(y_train,'float32')
x_test=np.array(x_test,'float32')
y_test=np.array(y_test,'float32')
#进行均值处理和标准差(归一化)
#对每列求均值
x_train-=np.mean(x_train,axis=0)
x_test-=np.mean(x_test,axis=0)
#计算每一列的标准差
x_train/=np.std(x_train,axis=0)
x_test/=np.std(x_test,axis=0)
#定义数据，方便对后面数据整体修改
features=64
batch_size=500
epochs=150
labels=7
width,height=48,48
#标签转换独热编码
y_train=np_utils.to_categorical(y_train,num_classes=labels)
y_test=np_utils.to_categorical(y_test,num_classes=labels)
#从新定义维度
x_train=x_train.reshape(x_train.shape[0],width,height,1)
x_test=x_test.reshape(x_test.shape[0],width,height,1)
#搭积木
model=Sequential()
#第一次卷积
model.add(Conv2D(features,kernel_size=(5,5),activation='relu',input_shape=(x_train.shape[1:])))
#第一次池化
model.add(MaxPooling2D(pool_size=(3,3),strides=(2,2)))
#抛弃百分之三十的神经元
model.add(Dropout(0.3))
#第二次卷积
model.add(Conv2D(features,kernel_size=(5,5),activation='relu'))
#第二次池化
model.add(MaxPooling2D(pool_size=(3,3),strides=(2,2)))
#再次抛弃三十的神经元
model.add(Dropout(0.3))
#第三次卷积
model.add(Conv2D(features,kernel_size=(5,5),activation='relu'))
#第三次池化
model.add(MaxPooling2D(pool_size=(3,3),strides=(2,2)))
#扁平
model.add(Flatten())
#全连接层
model.add(Dense(features,activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(labels,activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
#训练                                                                    根据数据集效果进行调整
model.fit(x_train,y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(x_test,y_test),shuffle=True)

