import os

import numpy

import sys
import numpy as np
import glob
import keras
from PIL import Image
from keras.engine.saving import load_model
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
import matplotlib.pyplot as plt

# 生成虚拟数据
from keras.utils import plot_model

x_train_model = np.random.random((100, 40, 40, 3))
y_train_model = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
# x_test = np.random.random((20, 40, 40, 3))
# y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)

path = "/Users/mc/Desktop/captcha_pic_single/"  # 必须是20的倍数
batch_size = 100  # 每批图片数量
epochs = 20  # 训练次数
x_train = []
y_train = []
x_test = []
y_test = []
x_valid = []
y_valid = []
pics = os.listdir(path)
pics.remove(".DS_Store")  # 删除隐藏文件
picSize = len(pics)


def getTrainData(batch_index):
    for i in range(0, batch_size):
        file = pics[i + batch_index * batch_size]
        if file[-3:] == "png":
            x_train.append(numpy.asarray(Image.open(path + file)))
            y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
            y[int(file[:1])] = 1
            y_train.append(y)


def getTestData(batch_index):
    for i in range(0, batch_size):
        file = pics[i + batch_index * batch_size]
        if file[-3:] == "png":
            x_test.append(numpy.asarray(Image.open(path + file)))
            y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
            y[int(file[:1])] = 1
            y_test.append(y)


def getValidData(batch_index):
    for i in range(0, batch_size):
        file = pics[i + batch_index * batch_size]
        if file[-3:] == "png":
            x_valid.append(numpy.asarray(Image.open(path + file)))
            y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
            y[int(file[:1])] = 1
            y_valid.append(y)


for i in range(0, int(picSize / batch_size)):  # int(1/2)向下取整 使用batch_size的倍数的数据量  多余的不要了
    getTrainData(i)
    getTestData(i)
    getValidData(i)
# 浮点 + 归一
x_train = numpy.asarray(x_train).astype("float32")
y_train = numpy.asarray(y_train).astype("float32")
x_train /= 255
y_train /= 255
x_test = numpy.asarray(x_test).astype("float32")
y_test = numpy.asarray(y_test).astype("float32")
x_test /= 255
y_test /= 255
x_valid = numpy.asarray(x_valid).astype("float32")
y_valid = numpy.asarray(y_valid).astype("float32")
x_valid /= 255
y_valid /= 255

model = Sequential()
# 输入: 3 通道 40x40 像素图像 -> (40, 40, 3) 张量。
# 使用 32 个大小为 3x3 的卷积滤波器。
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(40, 40, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(256, activation='relu'))  # 全连接网络层
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))  # 逻辑回归层

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])  # metrics=['accuracy']显示准确率

model.summary()  # 打印模型
plot_model(model, to_file='generator/cnnModel.png', show_shapes=True)  # 将模型保存为图片

# 将标签转换为分类的 one-hot 编码 (由数字答案转化为概率矩阵 这一步在getdata里已经做了)
# labels = np.random.randint(10, size=(1000, 1))
# one_hot_labels = keras.utils.to_categorical(labels, num_classes=10)

# 训练模型 以 batch_size 个样本为一个 batch 进行迭代
history = model.fit(x_train, y_train, validation_split=0.33, batch_size=batch_size, epochs=epochs)
# print(history.history)  # 每个 epoch 后记录训练集和验证集的误差和准确率
# 绘制训练 & 验证的准确率值
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])  # model.fit中需要添加validation_split
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()

# 测试模型
score = model.evaluate(x_test, y_test, batch_size=batch_size)
print('\n---模型准确率: %.2f%%' % (score[1] * 100))

# 保存模型
model.save('generator/cnnModel.h5')  # 创建 HDF5 文件 'my_model.h5'
# del model  # 删除现有模型

#  加载模型
model = load_model('generator/cnnModel.h5')

# 预测结果 用户输入 读取图片 转为1矩阵数据 mode.predict_classes 输出结果
result = model.predict_classes(np.random.random((1, 40, 40, 3)))  # 预测
print('---预测结果:', result, '\n')
sys.exit(1)
