# coding=utf-8
# 多尺度卷积神经网络，训练振动类型设备数据

import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
import tensorflow as tf
import sys
import argparse
import shutil
import time
import image
import glob
import matplotlib as mpl

mpl.use("Agg")
import matplotlib.pyplot as plt

from Utils import bilinear_interpolation

'''
# 模型搭建
model = Sequential()
model.add(Conv2D(12, kernel_size=(3, 3), activation='relu', input_shape=(30, 30, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(24, (5, 5), activation='relu'))
output1 = model.add(MaxPooling2D(pool_size=(2, 2)))
output2 = model.add(Conv2D(24, (3, 3), activation='relu'))
output = np.concatenate((output1, output2), axis=0)
model.add(Flatten(output))  # 对最后一个卷积和最后一个池化flatten
model.add(Dense(200))
model.add(Dense(1))
'''

BASE_DIR = os.path.dirname(os.path.abspath(__file__))  # /home/zxl/zy/IIOT_model/Models_Train
sys.path.append(BASE_DIR)

parser = argparse.ArgumentParser()  # 创建解析器
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--max_epoch', type=int, default=200, help='Epoch to run [default: 10000]')
parser.add_argument('--batch_size', type=int, default=10, help='Batch Size during training [default: 14]')
parser.add_argument('--learning_rate', type=float, default=0.05, help='Initial learning rate [default: ]')
FLAGS = parser.parse_args()

GPU_INDEX = FLAGS.gpu
MAX_EPOCH = FLAGS.max_epoch
BATCH_SIZE = FLAGS.batch_size
BASE_LEARNING_RATE = FLAGS.learning_rate
LOG_DIR = 'LOG_MCSNN_Bearing'
write_result = True

DATA_ROOT = '../Data'
data_path = 'PHM12_Bearing/Bearing1_1_TFRs'

# 日志设置
name_file = sys.argv[0]  # /home/zxl/zy/Predict_Module/Models_Train/model_lstm.py
# DATA_ROOT = '/home/zxl/zy/Predict_Module/Data'  # 数据集目录
if os.path.exists(LOG_DIR):
    shutil.rmtree(LOG_DIR)  # 清空日志
os.mkdir(LOG_DIR)  # 创建日志
os.system('cp %s %s' % (name_file, LOG_DIR))  # 复制本文件到日志中
LOG_FOUT = open(os.path.join(LOG_DIR, str(LOG_DIR) + '.txt'), 'w')  # os.path.join路径拼接，打开该文件删除原有内容并开始写入
LOG_FOUT.write(str(FLAGS) + '\n')  # 将所有参数写入日志


def log_string(out_str):  # 内容写入日志
    LOG_FOUT.write(out_str + '\n')  # 写入字符串
    LOG_FOUT.flush()  # 刷新缓冲区
    print(out_str)


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)


def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')


def MSCNN(input_x, input_u, is_training):
    W_conv1 = weight_variable([3, 3, 3, 12])
    b_conv1 = bias_variable([12])
    h_conv1 = tf.nn.relu(conv2d(input_x, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    W_conv2 = weight_variable([5, 5, 12, 24])
    b_conv2 = bias_variable([24])
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)
    # print(h_pool2)

    W_conv3 = weight_variable([3, 3, 24, 24])
    b_conv3 = bias_variable([24])
    h_con3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)

    W_multi = weight_variable([8, 8, 24])
    b_multi = bias_variable([24])  #
    h_multi = tf.nn.relu(h_con3 * W_multi + h_pool2 * W_multi + b_multi)

    h_multi = tf.layers.flatten(h_multi)
    predict = tf.contrib.layers.fully_connected(h_multi, 200, activation_fn=None)
    predict = tf.contrib.layers.fully_connected(predict, 1, activation_fn=None)

    return predict


def data_pro():
    with tf.Session() as sess:
        image_path = os.path.join(DATA_ROOT, data_path, '1_TFR.jpg')
        image_raw_data = tf.gfile.FastGFile(image_path, 'rb').read()
        image_data = tf.image.decode_jpeg(image_raw_data)  # 解码
        image_data = tf.image.convert_image_dtype(image_data, dtype=tf.float32)  # 实数转化
        image_data = tf.image.resize_images(image_data, [30, 30], method=0)  # 双线性插值
        # print(image_data.eval())


def main():
    with tf.Graph().as_default():
        with tf.device('/gpu:' + str(GPU_INDEX)):
            is_training_pl = tf.placeholder(tf.bool, shape=())
            input_x = tf.placeholder(tf.float32, shape=(None, 30, 30, 3))
            input_y = tf.placeholder(tf.float32, shape=(None, 1))

            # 调用模型
            predictions = MSCNN(input_x, input_y, is_training=is_training_pl)
            loss = tf.losses.mean_squared_error(labels=input_y, predictions=predictions)
            tf.summary.scalar('loss(MSE)', loss)

            batch = tf.Variable(0, trainable=False)
            optimizer = tf.train.AdamOptimizer(learning_rate=0.1)  # Adam算法优化器
            train_op = optimizer.minimize(loss, global_step=batch)

            saver = tf.train.Saver()

        config = tf.ConfigProto()  # 配置tf.Session的运算方式
        config.gpu_options.allow_growth = True  # 当使用GPU时候，Tensorflow运行自动慢慢达到最大GPU的内存
        config.allow_soft_placement = True  # 如果指定设备不存在，允许TF自动分配设备
        config.log_device_placement = False  # 打印设备分配日志
        sess = tf.Session(config=config)

        merged = tf.summary.merge_all()  # 将所有summary全部保存到磁盘，以便tensorboard显示。一般这一句就可显示训练时的各种信息
        train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)  # 用指定文件来保存图
        init = tf.global_variables_initializer()
        sess.run(init)

        # 获取训练集与测试集大小
        train_path_1 = os.path.join(DATA_ROOT, 'PHM12_Bearing/Bearing1_1_TFRs', '*.jpg')
        train_files_1 = len(glob.glob(pathname=train_path_1))
        train_path_2 = os.path.join(DATA_ROOT, 'PHM12_Bearing/Bearing1_2_TFRs', '*.jpg')
        train_files_2 = len(glob.glob(pathname=train_path_2))
        test_path = os.path.join(DATA_ROOT, 'PHM12_Bearing/Bearing1_3_TFRs', '*.jpg')
        test_files = len(glob.glob(pathname=test_path))

        # 读取训练集
        train_x1 = []
        for i in range(1, train_files_1 + 1):
            image_path = os.path.join(DATA_ROOT, 'PHM12_Bearing/Bearing1_1_TFRs', str(i) + '_TFR.jpg')
            image_raw_data = tf.gfile.FastGFile(image_path, 'rb').read()
            image_data = tf.image.decode_jpeg(image_raw_data)  # 解码
            image_data = tf.image.convert_image_dtype(image_data, dtype=tf.float32)  # 实数转化
            train_data = tf.image.resize_images(image_data, [30, 30], method=0)  # 双线性插值
            train_x1.append(train_data.eval(session=sess))
            print(i)
        train_x2 = []
        for i in range(1, train_files_2 + 1):
            image_path = os.path.join(DATA_ROOT, 'PHM12_Bearing/Bearing1_2_TFRs', str(i) + '_TFR.jpg')
            image_raw_data = tf.gfile.FastGFile(image_path, 'rb').read()
            image_data = tf.image.decode_jpeg(image_raw_data)  # 解码
            image_data = tf.image.convert_image_dtype(image_data, dtype=tf.float32)  # 实数转化
            train_data = tf.image.resize_images(image_data, [30, 30], method=0)  # 双线性插值
            train_x2.append(train_data.eval(session=sess))
            print(i)
        train_x = np.vstack((train_x1, train_x2))
        train_x = np.array(train_x, dtype=np.float32)

        # 标签 10s取一次样，/s单位换算为/10s
        train_y1 = np.arange(0, 1 * (train_files_1 - 1), 1)
        train_y1 = list(train_y1)
        train_y1.reverse()
        train_y2 = np.arange(0, 1 * (train_files_2 - 1), 1)
        train_y2 = list(train_y2)
        train_y2.reverse()
        train_y1.extend(train_y2)
        train_y = np.reshape(train_y1, (len(train_y1), 1))

        # 获取测试集
        test_x = []
        for i in range(1, test_files + 1):
            image_path = os.path.join(DATA_ROOT, 'PHM12_Bearing/Bearing1_3_TFRs', str(i) + '_TFR.jpg')
            image_raw_data = tf.gfile.FastGFile(image_path, 'rb').read()
            image_data = tf.image.decode_jpeg(image_raw_data)  # 解码
            image_data = tf.image.convert_image_dtype(image_data, dtype=tf.float32)  # 实数转化
            test_data = tf.image.resize_images(image_data, [30, 30], method=0)  # 双线性插值
            test_x.append(test_data.eval(session=sess))
            print(i)
        test_x = np.array(test_x, dtype=np.float32)

        test_y = np.arange(0, 1 * (test_files - 1), 1)
        test_y = list(test_y)
        test_y.reverse()
        # test_y = np.array(test_y, dtype=np.float32)
        # test_y = list(test_y)
        test_y = np.reshape(test_y, (len(test_y), 1))

        # 训练
        start = time.time()
        min_loss = np.inf
        for epoch_idx in range(MAX_EPOCH):
            for batch_idx in range(int(len(train_x) / BATCH_SIZE)):
                start_idx = batch_idx * BATCH_SIZE
                end_idx = (batch_idx + 1) * BATCH_SIZE
                feed_x = train_x[start_idx:end_idx]
                feed_y = train_y[start_idx:end_idx]
                summary, step, _, current_loss = sess.run([merged, batch, train_op, loss],
                                                          feed_dict={is_training_pl: True,
                                                                     input_x: feed_x, input_y: feed_y})
            train_writer.add_summary(summary, step)

            if epoch_idx % 1 == 0:
                loss_sum = 0
                for test_idx in range(int(len(test_x) / BATCH_SIZE)):
                    start_idx = test_idx * BATCH_SIZE
                    end_idx = (test_idx + 1) * BATCH_SIZE
                    feed_data = test_x[start_idx:end_idx]
                    feed_label = test_y[start_idx:end_idx]
                    test_predict, test_loss = sess.run([predictions, loss],
                                                       feed_dict={is_training_pl: False,
                                                                  input_x: feed_data, input_y: feed_label})
                    loss_sum += test_loss
                loss_mean = loss_sum / float(len(test_x) / BATCH_SIZE)
                # 保存最优模型
                if loss_mean <= min_loss:
                    min_loss = loss_mean
                    if write_result == True:
                        save_path_loss = saver.save(sess, os.path.join(LOG_DIR,
                                                                       "MODEL_MSCNN_LOSS.ckpt"))
            # print min_loss, min_rmse
            if epoch_idx % 5 == 0:
                # print "train step: " + str(epoch_idx) + ", loss: " + str(current_loss) + ", test_loss: " + str(min_loss)
                log_string(
                    "train step: " + str(epoch_idx) + ", loss: " + str(current_loss) + ", test_loss: " + str(loss_mean))
        # 训练时间
        end = time.time()
        running_time = end - start
        # print "time cost : %.5f sec" % running_time
        # print "(测试集)最小loss：" + str(min_loss)
        log_string("-----------------------------------------------------------------------")
        log_string("Time Cost : %.5f sec" % running_time)
        log_string("Minest Test Loss：" + str(min_loss))


if __name__ == "__main__":
    main()
    # data_pro()
