# 相关库的导入
from tensorflow import keras
import tensorflow.keras.layers as layers
import tensorflow as tf
import tensorflow.compat.v1 as tf  # 防止是tensorflow是1.0版本，这样可以兼容
import matplotlib

matplotlib.use('TkAgg')  # 最后会报错，不出图，加这个
import matplotlib.pyplot as plt
import numpy as np
import cv2
import pandas as pd
from keras.callbacks import TensorBoard
import os
import time

# 导入数据
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'  # 不使用GPU，使用改为0，多个GPU，按照GPU的ID写
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()  # 执行tensorflow加载mnist数据集
# 观察数据
print(x_train.shape)  # 读取数组长度
# plt.imshow(x_train[10000])
print(y_train[10000])
# 数据处理，维度要一致
x_train = x_train.reshape((-1, 28, 28, 1)).astype('float32')
x_test = x_test.reshape((-1, 28, 28, 1)).astype('float32')  # -1代表那个地方由其余几个值算来的
x_train = x_train / 255
x_test = x_test / 255
print(x_train.shape)

data = pd.read_excel('data.xlsx')  # 载入初始权重
weight = [data['Id'].values]
print(np.repeat(weight, 1000, axis=0).shape)
init = tf.constant_initializer(np.repeat(weight, 1000, axis=0))
# 定义模型方法一-add()
model = keras.Sequential()
model.add(layers.Conv2D(input_shape=(28, 28, 1),
                        filters=20, kernel_size=(5, 5), strides=(1, 1), padding='valid',
                        activation='relu'))  # 卷积层加激活
model.add(layers.MaxPool2D(pool_size=(2, 2)))  # 池化层

model.add(layers.Conv2D(
    filters=40, kernel_size=(3, 3), strides=(1, 1), padding='valid',
    activation='relu'))  # 卷积层加激活
model.add(layers.MaxPool2D(pool_size=(2, 2)))  # 池化层

model.add(layers.Flatten())  # 全连接层，将多维数组一维化

# model.add(layers.Dense(1000, activation='relu'))

model.add(layers.Dense(100, activation='relu', kernel_initializer=init))
# 分类层
model.add(layers.Dense(10, activation='softmax'))
# 训练模型配置
model.compile(  # 设置训练模型的优化器
    # loss=keras.losses.CategoricalCrossentropy(),
    # 损失函数多分类使用交叉熵（这里还要看标签是否为one-hot编码）
    # 回归问题用均方差
    loss=keras.losses.SparseCategoricalCrossentropy(),  # 计算多分类问题中的交叉熵
    # loss='categorical_crossentropy',
    metrics=['accuracy']  # 计算模型的准确率
    # index false为不写入索引

)
# 输出模型
# 进行模型训练
history = model.fit(x_train, y_train, batch_size=600, epochs=1200, steps_per_epoch=1, validation_split=0.1)
model.summary()  # batc_size 可以看着调，大点防止过拟合，不要太大
# model.summary 放在后面
#导出数据
df1 = pd.DataFrame({'One': history.history['accuracy']})
df1.to_excel('excel1.xlsx', sheet_name='Sheet1', index=False)

plt.plot(history.history['accuracy'])
#plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('iteration')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()

# 绘制训练 & 验证的损失值
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('iteration')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
print("savemodel---------------")
# model_json=model.to_json()
# with open('model.json','w') as f:
#     f.write(model_json)   这是保存模型为json格式
model.save(os.path.join('model3_3.h5'))  # 保存模型为.h5文件
