import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

#读取数据，分割成x,y
data=np.loadtxt('img_16_10k.txt',delimiter=',')
x=data[:,0:-1]
y=data[:,[-1]]

# 转变成卷积需要的维度形式
x=x.reshape([-1,16,16,1])

# 数据集切分
train_x,test_x,train_y,test_y=train_test_split(x,y,test_size=0.3)
# print(x.shape)

'''
3.1 进行卷积1处理， 维度变为 12*12*6
3.2 池化处理维度变为  6*6*6
3.3 卷积2处理， 维度变为 4*4*12
3.4 池化处理 ， 维度变为 2*2*12
3.5 全连接处理数据，维度变为添加dropout处理
3.6激活函数使用relu
'''
class Net(tf.keras.models.Model):
    def __init__(self):
        super(Net, self).__init__()  #调用父类的__init__()

        self.conv1 = tf.keras.layers.Conv2D(6, kernel_size=(5, 5), strides=1)
        self.pool1 = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)
        self.act1 = tf.keras.layers.Activation('relu')
        self.conv2 = tf.keras.layers.Conv2D(12, kernel_size=(3, 3), strides=1)
        self.pool2 = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)
        self.act2 = tf.keras.layers.Activation('relu')
        self.flat = tf.keras.layers.Flatten()  #展开
        self.fc1 = tf.keras.layers.Dense(1024,activation='relu')
        self.drop1 = tf.keras.layers.Dropout(0.3)
        self.fc2 = tf.keras.layers.Dense(100,activation='relu')
        self.drop2 = tf.keras.layers.Dropout(0.3)
        self.out = tf.keras.layers.Dense(10, activation='softmax')
    #正向传播
    def call(self,x):    #keras.fit中调用需call(无下划线)
        x = self.conv1(x)
        x = self.pool1(x)
        x = self.act1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.act2(x)
        x = self.flat(x)
        x = self.fc1(x)
        x = self.drop1(x)
        x = self.fc2(x)
        x = self.drop2(x)
        out = self.out(x)

        return out

model=Net()

model.compile(optimizer=tf.keras.optimizers.Adam(),
              loss=tf.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

history=model.fit(train_x,train_y,batch_size=64,epochs=10,validation_data=(test_x,test_y))

#model.fit返回值.history中自带的参数
acc=history.history['accuracy']
val_acc=history.history['val_accuracy']

loss=history.history['loss']
val_loss=history.history['val_loss']

plt.plot(acc,'o-',label='accuracy')
plt.plot(val_acc,'o-',label='val_accuracy')
plt.legend()
plt.show()

plt.plot(loss,'o-',label='loss')
plt.plot(val_loss,'o-',label='val_loss')
plt.legend()
plt.show()