import os
import random

import numpy as np
import pandas as pd
from PIL import Image
from keras.utils import np_utils
from keras.models import Sequential, Model, load_model  # keras的神经网络模型
from keras.layers import Dense, Activation, Dropout
import tensorflow as tf


class ModelSave(object):
    def model_save(self, model, model_path):
        """保存模型方法
        :param model: 模型对象
        :param model_path: 模型保存的路径
        """
        model.save(model_path)

    def model_load(self, model_path):
        """加载模型方法
        param model_path: 要加载的模型的路径
        return : 返回模型对象
        """
        model = load_model(model_path)
        return model


class DeepLearn(ModelSave):
    """深度学习类"""

    def __init__(self, pred_path):
        """1. 准备数据，构建数据"""
        path = os.path.abspath(os.path.dirname(__file__)) + os.sep
        file_path = path + "data" + os.sep + "mnist.npz"
        # print(file_path)
        f = np.load(file_path)
        x_train = f['x_train']  # 训练特征集
        x_test = f['x_test']  # 测试特征集
        y_train = f['y_train']  # 训练目标集
        y_test = f['y_test']  # 测试目标集
        # print(x_train.shape)
        # print(x_test.shape)
        # print(y_train.shape)
        # print(y_test.shape)
        # from keras.utils import np_utils
        # todo 规定一些数据的大小
        self.img_size = 28 * 28  # 图像尺寸
        self.num_size = 10  # 目标数字的尺寸
        self.batch_size = 128  # 批量的尺寸
        self.nb_epochs = 2  # 迭代次数

        # todo 特征数据的处理，优化
        # 降维度， 把3维数据降为2维 (60000, 28, 28)-->(60000,28*28)
        # 灰度数据， 从unit8  --》 double方式
        self.x_train = (x_train.reshape(60000, self.img_size).astype("float32")) / 255
        self.x_test = (x_test.reshape(10000, self.img_size).astype("float32")) / 255

        # todo 目标集数据的处理： 采用独热编码：one-hot 编码 ， 将unit8 编码 转为 独热编码
        self.y_train = np_utils.to_categorical(y_train, self.num_size)
        self.y_test = np_utils.to_categorical(y_test, self.num_size)

        # todo 预测图片的处理
        # PIL有九种不同的模式 1，L ,P,RGB,CMYK...
        img = Image.open(pred_path).resize((28, 28)).convert("L")  # 灰度化
        im_arr = np.array(img)
        # 降低维度
        pred_img = (im_arr.reshape(1, self.img_size).astype("float32")) / 255
        self.pred_img = pred_img

    def dnn_keras(self):
        """浅层神经网络模型"""
        base_path = os.path.abspath(os.path.dirname(__file__)) + os.sep
        model_path = base_path + "static" + os.sep + "model" + os.sep + "dnn.h5"  # h5是一种数据格式，HDF5格式
        if os.path.exists(model_path):
            """假如模型文件存在"""
            # todo 加载模型
            print("加载模型...")
            model = self.model_load(model_path)
        else:
            """假如模型文件不存在，则需要创建及训练模型"""
            print("构造模型...")
            # 2. 构造模型  #Dense : 10: 输出的类型  ， input_shape 输入  ， activation：激活函数
            model = Sequential([Dense(10, input_shape=(self.img_size,), activation="softmax")])

            # 3.编译模型 ： 用于指定优化器， 损失函数 ， 测量方式等
            # optimizer="rmsprop"-->指定优化器为 动量梯度下降法
            model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"])

            # 4. 模型训练  verbose:日志提示方式
            model.fit(self.x_train, self.y_train, batch_size=self.batch_size,
                      epochs=10, verbose=1, validation_data=(self.x_test, self.y_test))
            # todo 保存模型
            self.model_save(model, model_path)

        # 5. 模型评估
        metr = model.evaluate(self.x_test, self.y_test, verbose=0)  # 返回误差和得分
        score = round(metr[1], 2)
        score = "DNN模型正确率:" + str(score)

        # 6 模型应用
        pred = model.predict(self.pred_img)
        # 结果解码
        pred = np.argmax(pred)
        pred = "预测的结果是:" + str(pred)
        print(pred, score)
        return pred, score

    def mlp_keras(self):
        """多层神经网络模型"""
        base_path = os.path.abspath(os.path.dirname(__file__)) + os.sep
        model_path = base_path + "static" + os.sep + "model" + os.sep + "mlp.h5"  # h5是一种数据格式，HDF5格式
        if os.path.exists(model_path):
            """假如模型文件存在"""
            # todo 加载模型
            print("加载模型...")
            model = self.model_load(model_path)
        else:
            """假如模型文件不存在，则需要创建及训练模型"""
            print("构造模型...")
            # 2. 构造模型  #Dense : 10: 输出的类型  ， input_shape 输入  ， activation：激活函数
            model = Sequential([  # 输入层
                Dense(512, input_shape=(self.img_size,)),
                Activation('relu'),  # 激活函数
                Dropout(0.2),  # 随机失活
                # 隐层
                Dense(512, input_shape=(self.img_size,)),
                Activation('relu'),  # 激活函数
                Dropout(0.2),  # 随机失活
                # 输出层
                Dense(10, input_shape=(self.img_size,), activation="softmax"),
            ])

            # 3.编译模型 ： 用于指定优化器， 损失函数 ， 测量方式等
            # optimizer="rmsprop"-->指定优化器为 动量梯度下降法
            model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"])

            # 4. 模型训练  verbose:日志提示方式
            model.fit(self.x_train, self.y_train, batch_size=self.batch_size,
                      epochs=2, verbose=1, validation_data=(self.x_test, self.y_test))
            # todo 保存模型
            self.model_save(model, model_path)

        # 5. 模型评估
        metr = model.evaluate(self.x_test, self.y_test, verbose=0)  # 返回误差和得分
        score = round(metr[1], 2)
        score = "MLP模型正确率:" + str(score)

        # 6 模型应用
        pred = model.predict(self.pred_img)
        # 结果解码
        pred = np.argmax(pred)
        pred = "预测的结果是:" + str(pred)
        print(pred, score)
        return pred, score

    def dnn_tensorflow(self):
        """tensorflow框架
        y=ax+b
        """
        # z占位符
        x = tf.placeholder("float32", [None, 784])
        y = tf.placeholder("float32", [None, 10])
        # 权重
        W = tf.Variable(tf.zeros([784, 10]))
        # 偏置
        b = tf.Variable(tf.zeros([10]))
        # 预测函数
        y_pred = tf.nn.softmax(tf.matmul(x, W) + b)  # matmul表示两个矩阵相乘
        # 误差函数：交叉熵函数
        loss = -tf.reduce_mean(y * tf.log(y_pred))
        # 定义优化器
        optimizer = tf.train.GradientDescentOptimizer(0.01)
        # 定义训练步骤
        train_step = optimizer.minimize(loss)

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())  # 激活所有变量
            train_len = 60000
            for i in range(3000):
                ram_size1 = random.randint(1, train_len)
                ram_size2 = ram_size1 + 128  # 一批次128张
                if ram_size2 > train_len:  # 超过了60000
                    ram_size2 = ram_size1
                    ram_size1 = ram_size1 - 128
                x_train = self.x_train[ram_size1:ram_size2]
                y_train = self.y_train[ram_size1:ram_size2]
                # 训练
                sess.run(train_step, feed_dict={x: x_train, y: y_train})
                if i % 20 == 0:
                    print(f"第{i}次迭代，误差为：{sess.run(loss, feed_dict={x: x_train, y: y_train})}")

            # 模型评估
            correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # 均值，转换成flaot32l类型
            metr = sess.run(accuracy, feed_dict={x: self.x_test, y: self.y_test})

            # 模型应用
            pred = sess.run(y_pred, feed_dict={x: self.pred_img})
            pred = np.argmax(pred)
            # print("预测结果为：",pred)
        score = round(metr, 2)
        score = "tensorflow模型正确率：" + str(score)
        pred = "预测结果：" + str(pred)
        print(pred, score)
        return pred, score


if __name__ == '__main__':
    dl = DeepLearn("0.png")
    # dl.dnn_keras()
    # dl.mlp_keras()
    dl.dnn_tensorflow()
