# -*- coding: utf-8 -*-
__author__ = 'Gui'
'''
@Time    : 2022/6/5 18:21
@Author  : Gui
@File    : fira_Train_model_tensorflow.py
@Software: PyCharm
if don't safe,change yuzhi
'''

import tensorflow as tf
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
import os
import glob
import sys
from sklearn.model_selection import train_test_split
import time
from keras.callbacks import TensorBoard

# from tensorflow.keras.callbacks import TensorBoard

# Loading dataset
print("Loading dataset ...")

# batch大小
images_per_batch = 10
# 画面大小
width = 480
width_save = 480
height = 180
channel = 1  # 通道数量 1
classes = 5
# data = np.zeros((1, height, width))
data = np.zeros((1, height, width_save))
label = np.zeros((1, classes))
deleted = 0
total = 0
# image_array = np.zeros((1, height * width * channel))
image_array = np.zeros((1, height * width_save * channel))
label_array = np.zeros((1, classes), 'float')
# training_data = glob.glob('dataset/*.npz')  # 寻找所有.npz文件
training_data = glob.glob('dataset/labeled_img_data_6box3002.npz')  # 寻找所有.npz文件
# i = 1
if not training_data:  # 未读到文件
    print("No training data in directory, exit")
    sys.exit()

repeat = True

while repeat:
    for single_npz in training_data:
        with np.load(single_npz) as data:
            train_temp = data['train']
            train_labels_temp = data['train_labels']
        image_array = np.vstack((image_array, train_temp))  # 训练数据图片纵向堆叠
        label_array = np.vstack((label_array, train_labels_temp))  # 训练数据标签纵向堆叠
        # i += 1
    print("dataset loaded.")
    print("---------------------------------------------------------")
    X_data = image_array[1:, :]  # X为数据图片
    Y_data = label_array[1:, :classes]  # Y为数据标签
    print("X_data:", X_data.shape)
    print("Y_data:", Y_data.shape)
    # print ("X_data:", X_data[:10, :])
    # print ("Y_data:", Y_data[:10, :])
    scaler = MinMaxScaler()  # 归一化
    # X_data = np.reshape(X_data, (X_data.shape[0], height * width)) # 一维
    X_data = np.reshape(X_data, (X_data.shape[0], height * width_save * channel))
    X_data = scaler.fit_transform(X_data)  # 对X_data进行统一处理均值方差最大小值，再归一化

    print("X_data:", X_data.max(), X_data.min())
    # print ("Y_data:", Y[:10, :])

    # 划分训练、测试数据 tr训练 te测试
    trData, teData, trLabel, teLabel = train_test_split(X_data, Y_data, test_size=0.3)
    # 转换为图片的格式 （batch，height，width，channels）
    X = trData.reshape(-1, height, width_save, channel)  # -1为未知行数量
    Y = trLabel
    print('Y:', Y.shape)
    batch_size = 10  # 使用小批量梯度下降MBGD算法，设定batch_size为8


    def generatebatch(X, Y, n_examples, batch_size):  # 生成batch
        for batch_i in range(n_examples // batch_size):  # n_examples被整除batch_size
            start = batch_i * batch_size
            end = start + batch_size
            batch_xs = X[start:end]
            batch_ys = Y[start:end]
            yield batch_xs, batch_ys  # 生成每一个batch


    tf.reset_default_graph()  # 函数用于清除默认图形堆栈并重置全局默认图形

    ########初始化操作########
    # ------------输入层------------------
    with tf.name_scope('inputs'):
        tf_X = tf.placeholder(tf.float32,
                              [None, height, width_save, channel],
                              name='input')  # 图像尺寸
        tf_Y = tf.placeholder(tf.float32,
                              [None, classes])

    # ---------- 卷积层+激活层---------------
    # 权值
    with tf.name_scope('first_layers'):
        with tf.name_scope('weights'):
            conv_filter_w1 = tf.get_variable('W', [5, 5, 1, 10],
                                             initializer=tf.contrib.layers.xavier_initializer())  # tf.Variable(tf.random_normal([3, 3, 1, 10])) # 一维
            tf.summary.histogram('first_layers_weights', conv_filter_w1)
        # conv_filter_w1 = tf.get_variable('W', [5, 5, 3, 10],
        #                                  initializer=tf.contrib.layers.xavier_initializer())  # tf.Variable(tf.random_normal([3, 3, 1, 10])) # 三维
        # 偏置
        # conv_filter_b1 = tf.Variable(tf.random_normal([10]))
        with tf.name_scope('biases'):
            conv_filter_b1 = tf.Variable(tf.random_normal([10]))
            tf.summary.histogram('first_layers_biases', conv_filter_b1)
        # 线性整流函数ReLU
        with tf.name_scope('ReLU'):
            relu_feature_maps1 = tf.nn.relu(tf.nn.conv2d(tf_X, conv_filter_w1, strides=[1, 2, 2, 1], padding='SAME')
                                            + conv_filter_b1)  # 一维
        # relu_feature_maps1 = tf.nn.relu(tf.nn.conv2d(tf_X,
        #                                              conv_filter_w1,
        #                                              strides=[3, 2, 2, 3],
        #                                              padding='SAME') + conv_filter_b1) # 三维
        print("conv_out1:", relu_feature_maps1)

        # -------------池化层------------------
        with tf.name_scope('max_pool'):
            max_pool1 = tf.nn.max_pool(relu_feature_maps1,
                                       ksize=[1, 3, 3, 1],
                                       strides=[1, 2, 2, 1],
                                       padding='SAME')
        print("max_pool:", max_pool1)

    # 第二层
    # -------------卷积层------------------
    # conv_filter_w2 = tf.Variable(tf.random_normal([3, 3, 10, 5]))
    with tf.name_scope('second_layers'):
        with tf.name_scope('weights'):
            conv_filter_w2 = tf.Variable(tf.random_normal([3, 3, 10, 5]))
            tf.summary.histogram('second_layers_weights', conv_filter_w2)
        with tf.name_scope('biases'):
            conv_filter_b2 = tf.Variable(tf.random_normal([5]))
            tf.summary.histogram('second_layers_biases', conv_filter_b2)
        with tf.name_scope('convolution'):
            conv_out2 = tf.nn.conv2d(max_pool1,
                                     conv_filter_w2,
                                     strides=[1, 1, 1, 1],
                                     padding='SAME') + conv_filter_b2
        print("conv_out2:", conv_out2)

        # --------BN归一化层+激活层-------------
        # 计算均值和方差
        with tf.name_scope('normalization'):
            batch_mean, batch_var = tf.nn.moments(conv_out2,
                                                  [0, 1, 2],
                                                  keep_dims=True)
            with tf.name_scope('shift'):
                shift = tf.Variable(tf.zeros([5]))  # 移动
                scale = tf.Variable(tf.ones([5]))  # 缩放
            epsilon = 1e-3  # ε
            # y=scale∗(x−mean)/var+shift 执行批归一化
            BN_out = tf.nn.batch_normalization(conv_out2,  # 输入
                                               batch_mean,  # 样本均值
                                               batch_var,  # 样本方差
                                               shift,  # 偏移
                                               scale,  # 缩放
                                               epsilon)
        print("BN_out:", BN_out)
        # 激活层
        with tf.name_scope('ReLU'):
            relu_BN_maps2 = tf.nn.relu(BN_out)

        # -------------池化层-----------------
        with tf.name_scope('max_pool'):
            max_pool2 = tf.nn.max_pool(relu_BN_maps2,
                                       ksize=[1, 3, 3, 1],
                                       strides=[1, 2, 2, 1],
                                       padding='SAME')
        print("max_pool2", max_pool2)

        # 将特征图进行展开
        with tf.name_scope('reshape'):
            max_pool2_flat = tf.reshape(max_pool2, [-1, 23 * 60 * 5])

    # -------------全连接层---------------
    with tf.name_scope('connection'):
        with tf.name_scope('weights'):
            fc_w1 = tf.Variable(tf.random_normal([23 * 60 * 5, 50]))
            tf.summary.histogram('connections_weights', fc_w1)
        with tf.name_scope('biases'):
            fc_b1 = tf.Variable(tf.random_normal([50]))
            tf.summary.histogram('connections_biases', fc_b1)
        with tf.name_scope('ReLU'):
            fc_out1 = tf.nn.relu(tf.matmul(max_pool2_flat, fc_w1) + fc_b1)  # 各系数相乘

        dropout_keep_prob = 0.8
        with tf.name_scope('dropout'):
            fc1_drop = tf.nn.dropout(fc_out1, dropout_keep_prob)  # 以0.8概率保留神经元

    # --------------输出层----------------
    with tf.name_scope('outputs'):
        with tf.name_scope('weights'):
            out_w1 = tf.Variable(tf.random_normal([50, classes]))
            tf.summary.histogram('outputs_weights', fc_w1)
        with tf.name_scope('biases'):
            out_b1 = tf.Variable(tf.random_normal([classes]))
            tf.summary.histogram('outputs_biases', fc_w1)
    # 计算当预测结果，由矩阵相乘使用激活函数输出
    with tf.name_scope('pred'):
        pred = tf.nn.softmax(tf.matmul(fc1_drop, out_w1) + out_b1, name='pred')

    with tf.name_scope('loss'):
        # 法1 由交叉熵误差计算损失函数
        loss = -tf.reduce_mean(tf_Y * tf.log(tf.clip_by_value(pred, 1e-11, 1.0)))
        tf.summary.scalar('loss', loss)

        # # 法2 由均方误差计算损失函数
        # loss = tf.reduce_mean(tf.square(tf_Y - pred))

    # 寻找全局最优点进行二次方梯度校正，其中带有动量项
    with tf.name_scope('train'):
        train_step = tf.train.AdamOptimizer(3e-4, name='train_step').minimize(loss)

    y_pred = tf.argmax(pred, 1)  # 按行比较选择预测结果中最大值的索引
    bool_pred = tf.equal(tf.argmax(tf_Y, 1), y_pred)  # 检测结果是否为真

    with tf.name_scope('acc'):
        accuracy = tf.reduce_mean(tf.cast(bool_pred, tf.float32), name="accuracy")  # 计算准确率
        tf.summary.scalar('accuracy', accuracy)
    # ***************保存神经网络结果的位置********************
    save_path = "model/auto_drive_6box300/"

    # ----------------------开始训练-------------------------#
    print("Start training ...")
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())  # 训练初始化
        merged = tf.summary.merge_all()
        writer = tf.summary.FileWriter("logs/", sess.graph)
        train_best = 0
        best_train_epoch = 0
        count = []
        test_res = 0
        test_best = 0
        best_test_epoch = 0
        for epoch in range(1000):  # 迭代1000个周期
            print("*******************************************************************")
            for batch_xs, batch_ys in generatebatch(X, Y, Y.shape[0], batch_size):  # 每个周期进行MBGD算法
                sess.run(train_step, feed_dict={tf_X: batch_xs, tf_Y: batch_ys})
            res = sess.run(accuracy, feed_dict={tf_X: X, tf_Y: Y})
            rs = sess.run(merged, feed_dict={tf_X: X, tf_Y: Y})
            writer.add_summary(rs, epoch)
            if res > train_best:  # 当前训练效果好
                train_best = res
                best_train_epoch = epoch
                # 执行预测
                Xte = teData.reshape((-1, height, width_save, channel))
                Yte = teLabel
                test_res = sess.run(accuracy, feed_dict={tf_X: Xte, tf_Y: Yte})
            if test_best < test_res:  # 当前检测效果好
                test_best = test_res
                best_test_epoch = epoch
                if test_res > 0.85:  # 训练准确率大于0.8，则保存
                    model = tf.train.Saver()
                    model.save(sess=sess, save_path=save_path, global_step=best_test_epoch)
                    print("model step at", best_test_epoch, "is saved.")
                    time.sleep(5)
            print("Epoch:", epoch)
            print("Train res:", res,
                  ", best train accuracy is:", train_best,
                  "at epoch:", best_train_epoch)
            print("Test res:", test_res,
                  ", best test accuracy is:", test_best,
                  "at epoch:", best_test_epoch)
            count = np.append(count, res)
            if len(count) >= 5:
                count = count[-5:]
                if np.var(count) < 1e-8:  # 计算其方差过小，则视为到达最优点并退出
                    print("陷入局部最小")
                    break


        print("best result:", train_best, "best result in test:", test_best)

        sess.close()
