from numpy.random import seed

seed(12345)
import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = '0'  # 定位cuda设备为GPU0（需要根据情况修稿）
config = tf.compat.v1.ConfigProto()
# config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9  # 最大可申请显存比例
config.gpu_options.allow_growth = True  # 允许动态分配显存
tf.config.experimental.set_memory_growth = True
sess = tf.compat.v1.Session(config=config)

import numpy as np
import skimage
import matplotlib.pyplot as plt
# from keras.models import *
# from keras.layers import *
# from keras.optimizers import *

# from keras.callbacks import TensorBoard
from keras.callbacks import TensorBoard, ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
from keras import backend as keras
from keras.optimizers import Adam
from utils import DataGenerator
from unet3 import *
import datetime
tf.random.set_seed(1234)


def main():

    goTrain()


def goTrain():
    # input image dimensions
    # 这是训练参数，运行不起来，可以更改batch_size（1，2，4，8，16）的大小
    params = {'batch_size': 1,
              'dim': (128, 128, 128),
              'n_channels': 1,
              'shuffle': True}
    # 这是训练图像路径
    seismPathT = "./data/train/seis/"
    faultPathT = "./data/train/fault/"
    # 这是验证图像的路径
    seismPathV = "./data/validation/seis/"
    faultPathV = "./data/validation/fault/"

    # 下面是训练和验证个数
    train_ID = range(200)
    valid_ID = range(20)

    # 训练图像数据生成器
    train_generator = DataGenerator(dpath=seismPathT, fpath=faultPathT,
                                    data_IDs=train_ID, **params)

    # 验证图像数据生成器
    valid_generator = DataGenerator(dpath=seismPathV, fpath=faultPathV,
                                    data_IDs=valid_ID, **params)

    # 初始化，搭建模型
    model = GIE_Net(input_size=(None, None, None, 1))
    # 下面是优化器和损失函数
    model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    # checkpoint,迭代模型保存路径
    filepath = "GIE_Net/GIE_Net_1_50-{epoch:02d}.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', save_best_only=False, save_weights_only=False,
                                 verbose=0, mode='auto', period=1)

    log_dir = "/home/chb/jupyter/logs" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir='./log', write_images=1, histogram_freq=1)
    callbacks_list = [checkpoint, tensorboard_callback]
    print("data prepared, ready to train!")
    # Fit the model，gpu运行不起来，可以使用cpu
    # with tf.device("/cpu"):
    # history = model.fit_generator(generator=train_generator, validation_data=valid_generator, epochs=50,
    #                               callbacks=callbacks_list, verbose=1)
    history1 = model.fit(x=train_generator, validation_data=valid_generator, epochs=50, callbacks=callbacks_list,
                         verbose=1)
    model.save("GIE_Net/GIE_Net_1_50.hdf5")
    showHistory(history1)


def showHistory(history):
    # list all data in history
    print(history.history.keys())
    fig = plt.figure(figsize=(10, 6))

    # summarize history for accuracy
    # plt.plot(history.history['acc'])
    # plt.plot(history.history['val_acc'])

    plt.plot(history.history['accuracy'])
    plt.plot(history.history['val_accuracy'])
    plt.title('Model accuracy', fontsize=20)
    plt.ylabel('Accuracy', fontsize=20)
    plt.xlabel('Epoch', fontsize=20)
    plt.legend(['train', 'test'], loc='center right', fontsize=20)
    plt.tick_params(axis='both', which='major', labelsize=18)
    plt.tick_params(axis='both', which='minor', labelsize=18)
    # plt.savefig("./data/"+ "Accuracy.png")
    plt.show()

    # summarize history for loss
    fig = plt.figure(figsize=(10, 6))
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss', fontsize=20)
    plt.ylabel('Loss', fontsize=20)
    plt.xlabel('Epoch', fontsize=20)
    plt.legend(['train', 'test'], loc='center right', fontsize=20)
    plt.tick_params(axis='both', which='major', labelsize=18)
    plt.tick_params(axis='both', which='minor', labelsize=18)
    plt.show()


class TrainValTensorBoard(TensorBoard):
    def __init__(self, log_dir='./log1', **kwargs):
        # Make the original `TensorBoard` log to a subdirectory 'training'
        training_log_dir = os.path.join(log_dir, 'training')
        super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
        # Log the validation metrics to a separate subdirectory
        self.val_log_dir = os.path.join(log_dir, 'validation')

    def set_model(self, model):
        # Setup writer for validation metrics
        self.val_writer = tf.summary.create_file_writer(self.val_log_dir)
        super(TrainValTensorBoard, self).set_model(model)

    def on_epoch_end(self, epoch, logs=None):
        # Pop the validation logs and handle them separately with
        # `self.val_writer`. Also rename the keys so that they can
        # be plotted on the same figure with the training metrics
        logs = logs or {}
        val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
        for name, value in val_logs.items():
            summary = tf.compat.v1.Summary()
            # summary = tf.Summary()
            summary_value = summary.value.add()
            # summary_value =
            summary_value.simple_value = value.item()
            summary_value.tag = name
            self.val_writer.add_summary(summary, epoch)
        self.val_writer.flush()
        # Pass the remaining logs to `TensorBoard.on_epoch_end`
        logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
        logs.update({'lr': keras.eval(self.model.optimizer.lr)})
        super(TrainValTensorBoard, self).on_epoch_end(epoch, logs)

    def on_train_end(self, logs=None):
        super(TrainValTensorBoard, self).on_train_end(logs)
        self.val_writer.close()


if __name__ == '__main__':
    main()
